diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 9699157c25c88ef1fd55c0b4eccb93ac1c6060a9..2f1e5b621d0ed28a63994b99c70f427a65b3e797 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -303,15 +303,6 @@ Who: Johannes Berg --------------------------- -What: CONFIG_NF_CT_ACCT -When: 2.6.29 -Why: Accounting can now be enabled/disabled without kernel recompilation. - Currently used only to set a default value for a feature that is also - controlled by a kernel/module/sysfs/sysctl parameter. -Who: Krzysztof Piotr Oledzki - ---------------------------- - What: sysfs ui for changing p4-clockmod parameters When: September 2009 Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt index 3ba0b945aaf862fc1c08a9c1a7f2ea04b0653486..f2430a7974e1655b3cdbb27b79eb801597ac6059 100644 --- a/Documentation/filesystems/nfs/nfsroot.txt +++ b/Documentation/filesystems/nfs/nfsroot.txt @@ -124,6 +124,8 @@ ip=:::::: Name of the client. May be supplied by autoconfiguration, but its absence will not trigger autoconfiguration. + If specified and DHCP is used, the user provided hostname will + be carried in the DHCP request to hopefully update DNS record. Default: Client IP address is used in ASCII notation. diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI index f172091fb7cd4c592980a36272b2a61a4db71b7e..309eb5ed942b138a13f0123de28d02efd2d0de47 100644 --- a/Documentation/isdn/INTERFACE.CAPI +++ b/Documentation/isdn/INTERFACE.CAPI @@ -113,12 +113,16 @@ char *driver_name int (*load_firmware)(struct capi_ctr *ctrlr, capiloaddata *ldata) (optional) pointer to a callback function for sending firmware and configuration data to the device + The function may return before the operation has completed. + Completion must be signalled by a call to capi_ctr_ready(). Return value: 0 on success, error code on error Called in process context. void (*reset_ctr)(struct capi_ctr *ctrlr) - (optional) pointer to a callback function for performing a reset on - the device, releasing all registered applications + (optional) pointer to a callback function for stopping the device, + releasing all registered applications + The function may return before the operation has completed. + Completion must be signalled by a call to capi_ctr_down(). Called in process context. void (*register_appl)(struct capi_ctr *ctrlr, u16 applid, diff --git a/Documentation/isdn/README.gigaset b/Documentation/isdn/README.gigaset index e472df8423236d8a095d3763a95464ce13a0d334..ef3343eaa00246c83493f384ddfee46514471504 100644 --- a/Documentation/isdn/README.gigaset +++ b/Documentation/isdn/README.gigaset @@ -47,9 +47,9 @@ GigaSet 307x Device Driver 1.2. Software -------- - The driver works with ISDN4linux and so can be used with any software - which is able to use ISDN4linux for ISDN connections (voice or data). - Experimental Kernel CAPI support is available as a compilation option. + The driver works with the Kernel CAPI subsystem as well as the old + ISDN4Linux subsystem, so it can be used with any software which is able + to use CAPI 2.0 or ISDN4Linux for ISDN connections (voice or data). There are some user space tools available at http://sourceforge.net/projects/gigaset307x/ @@ -152,61 +152,42 @@ GigaSet 307x Device Driver - GIGVER_FWBASE: retrieve the firmware version of the base Upon return, version[] is filled with the requested version information. -2.3. ISDN4linux - ---------- - This is the "normal" mode of operation. After loading the module you can - set up the ISDN system just as you'd do with any ISDN card supported by - the ISDN4Linux subsystem. Most distributions provide some configuration - utility. If not, you can use some HOWTOs like - http://www.linuxhaven.de/dlhp/HOWTO/DE-ISDN-HOWTO-5.html - If this doesn't work, because you have some device like SX100 where - debug output (see section 3.2.) shows something like this when dialing - CMD Received: ERROR - Available Params: 0 - Connection State: 0, Response: -1 - gigaset_process_response: resp_code -1 in ConState 0 ! - Timeout occurred - you probably need to use unimodem mode. (see section 2.5.) - -2.4. CAPI +2.3. CAPI ---- If the driver is compiled with CAPI support (kernel configuration option - GIGASET_CAPI, experimental) it can also be used with CAPI 2.0 kernel and - user space applications. For user space access, the module capi.ko must - be loaded. The capiinit command (included in the capi4k-utils package) - does this for you. - - The CAPI variant of the driver supports legacy ISDN4Linux applications - via the capidrv compatibility driver. The kernel module capidrv.ko must - be loaded explicitly with the command + GIGASET_CAPI) the devices will show up as CAPI controllers as soon as the + corresponding driver module is loaded, and can then be used with CAPI 2.0 + kernel and user space applications. For user space access, the module + capi.ko must be loaded. + + Legacy ISDN4Linux applications are supported via the capidrv + compatibility driver. The kernel module capidrv.ko must be loaded + explicitly with the command modprobe capidrv if needed, and cannot be unloaded again without unloading the driver first. (These are limitations of capidrv.) - The note about unimodem mode in the preceding section applies here, too. - -2.5. Unimodem mode - ------------- - This is needed for some devices [e.g. SX100] as they have problems with - the "normal" commands. + Most distributions handle loading and unloading of the various CAPI + modules automatically via the command capiinit(1) from the capi4k-utils + package or a similar mechanism. Note that capiinit(1) cannot unload the + Gigaset drivers because it doesn't support more than one module per + driver. - If you have installed the command line tool gigacontr, you can enter - unimodem mode using - gigacontr --mode unimodem - You can switch back using - gigacontr --mode isdn +2.4. ISDN4Linux + ---------- + If the driver is compiled without CAPI support (native ISDN4Linux + variant), it registers the device with the legacy ISDN4Linux subsystem + after loading the module. It can then be used with ISDN4Linux + applications only. Most distributions provide some configuration utility + for setting up that subsystem. Otherwise you can use some HOWTOs like + http://www.linuxhaven.de/dlhp/HOWTO/DE-ISDN-HOWTO-5.html - You can also put the driver directly into Unimodem mode when it's loaded, - by passing the module parameter startmode=0 to the hardware specific - module, e.g. - modprobe usb_gigaset startmode=0 - or by adding a line like - options usb_gigaset startmode=0 - to an appropriate module configuration file, like /etc/modprobe.d/gigaset - or /etc/modprobe.conf.local. +2.5. Unimodem mode + ------------- In this mode the device works like a modem connected to a serial port (the /dev/ttyGU0, ... mentioned above) which understands the commands + ATZ init, reset => OK or ERROR ATD @@ -234,6 +215,31 @@ GigaSet 307x Device Driver to an appropriate module configuration file, like /etc/modprobe.d/gigaset or /etc/modprobe.conf.local. + Unimodem mode is needed for making some devices [e.g. SX100] work which + do not support the regular Gigaset command set. If debug output (see + section 3.2.) shows something like this when dialing: + CMD Received: ERROR + Available Params: 0 + Connection State: 0, Response: -1 + gigaset_process_response: resp_code -1 in ConState 0 ! + Timeout occurred + then switching to unimodem mode may help. + + If you have installed the command line tool gigacontr, you can enter + unimodem mode using + gigacontr --mode unimodem + You can switch back using + gigacontr --mode isdn + + You can also put the driver directly into Unimodem mode when it's loaded, + by passing the module parameter startmode=0 to the hardware specific + module, e.g. + modprobe usb_gigaset startmode=0 + or by adding a line like + options usb_gigaset startmode=0 + to an appropriate module configuration file, like /etc/modprobe.d/gigaset + or /etc/modprobe.conf.local. + 2.6. Call-ID (CID) mode ------------------ Call-IDs are numbers used to tag commands to, and responses from, the @@ -263,7 +269,22 @@ GigaSet 307x Device Driver change its CID mode while the driver is loaded, eg. echo 0 > /sys/class/tty/ttyGU0/cidmode -2.7. Unregistered Wireless Devices (M101/M105) +2.7. Dialing Numbers + --------------- + The called party number provided by an application for dialing out must + be a public network number according to the local dialing plan, without + any dial prefix for getting an outside line. + + Internal calls can be made by providing an internal extension number + prefixed with "**" (two asterisks) as the called party number. So to dial + eg. the first registered DECT handset, give "**11" as the called party + number. Dialing "***" (three asterisks) calls all extensions + simultaneously (global call). + + This holds for both CAPI 2.0 and ISDN4Linux applications. Unimodem mode + does not support internal calls. + +2.8. Unregistered Wireless Devices (M101/M105) ----------------------------------------- The main purpose of the ser_gigaset and usb_gigaset drivers is to allow the M101 and M105 wireless devices to be used as ISDN devices for ISDN diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index b61f89fa01c193def3bc453f2422c4cc40ac06bb..d9239d5f3ad38ee2aca01e1c6cf25f4c1a6c82a5 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1598,8 +1598,7 @@ and is between 256 and 4096 characters. It is defined in the file [NETFILTER] Enable connection tracking flow accounting 0 to disable accounting 1 to enable accounting - Default value depends on CONFIG_NF_CT_ACCT that is - going to be removed in 2.6.29. + Default value is 0. nfsaddrs= [NFS] See Documentation/filesystems/nfs/nfsroot.txt. diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200 index 80c728522c4c7a0482d1804689f87869d9c404c0..e4d3267071e41d0d1548aedfc2d352641220d95f 100644 --- a/Documentation/networking/README.ipw2200 +++ b/Documentation/networking/README.ipw2200 @@ -171,7 +171,7 @@ Where the supported parameter are: led Can be used to turn on experimental LED code. - 0 = Off, 1 = On. Default is 0. + 0 = Off, 1 = On. Default is 1. mode Can be used to set the default mode of the adapter. diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 61f516b135b415986217f130fcadf064a6bf5c83..d0914781830e9005aafef5e5a27db7bd998deec8 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -49,6 +49,7 @@ Table of Contents 3.3 Configuring Bonding Manually with Ifenslave 3.3.1 Configuring Multiple Bonds Manually 3.4 Configuring Bonding Manually via Sysfs +3.5 Overriding Configuration for Special Cases 4. Querying Bonding Configuration 4.1 Bonding Configuration @@ -1318,8 +1319,87 @@ echo 2000 > /sys/class/net/bond1/bonding/arp_interval echo +eth2 > /sys/class/net/bond1/bonding/slaves echo +eth3 > /sys/class/net/bond1/bonding/slaves - -4. Querying Bonding Configuration +3.5 Overriding Configuration for Special Cases +---------------------------------------------- +When using the bonding driver, the physical port which transmits a frame is +typically selected by the bonding driver, and is not relevant to the user or +system administrator. The output port is simply selected using the policies of +the selected bonding mode. On occasion however, it is helpful to direct certain +classes of traffic to certain physical interfaces on output to implement +slightly more complex policies. For example, to reach a web server over a +bonded interface in which eth0 connects to a private network, while eth1 +connects via a public network, it may be desirous to bias the bond to send said +traffic over eth0 first, using eth1 only as a fall back, while all other traffic +can safely be sent over either interface. Such configurations may be achieved +using the traffic control utilities inherent in linux. + +By default the bonding driver is multiqueue aware and 16 queues are created +when the driver initializes (see Documentation/networking/multiqueue.txt +for details). If more or less queues are desired the module parameter +tx_queues can be used to change this value. There is no sysfs parameter +available as the allocation is done at module init time. + +The output of the file /proc/net/bonding/bondX has changed so the output Queue +ID is now printed for each slave: + +Bonding Mode: fault-tolerance (active-backup) +Primary Slave: None +Currently Active Slave: eth0 +MII Status: up +MII Polling Interval (ms): 0 +Up Delay (ms): 0 +Down Delay (ms): 0 + +Slave Interface: eth0 +MII Status: up +Link Failure Count: 0 +Permanent HW addr: 00:1a:a0:12:8f:cb +Slave queue ID: 0 + +Slave Interface: eth1 +MII Status: up +Link Failure Count: 0 +Permanent HW addr: 00:1a:a0:12:8f:cc +Slave queue ID: 2 + +The queue_id for a slave can be set using the command: + +# echo "eth1:2" > /sys/class/net/bond0/bonding/queue_id + +Any interface that needs a queue_id set should set it with multiple calls +like the one above until proper priorities are set for all interfaces. On +distributions that allow configuration via initscripts, multiple 'queue_id' +arguments can be added to BONDING_OPTS to set all needed slave queues. + +These queue id's can be used in conjunction with the tc utility to configure +a multiqueue qdisc and filters to bias certain traffic to transmit on certain +slave devices. For instance, say we wanted, in the above configuration to +force all traffic bound to 192.168.1.100 to use eth1 in the bond as its output +device. The following commands would accomplish this: + +# tc qdisc add dev bond0 handle 1 root multiq + +# tc filter add dev bond0 protocol ip parent 1: prio 1 u32 match ip dst \ + 192.168.1.100 action skbedit queue_mapping 2 + +These commands tell the kernel to attach a multiqueue queue discipline to the +bond0 interface and filter traffic enqueued to it, such that packets with a dst +ip of 192.168.1.100 have their output queue mapping value overwritten to 2. +This value is then passed into the driver, causing the normal output path +selection policy to be overridden, selecting instead qid 2, which maps to eth1. + +Note that qid values begin at 1. Qid 0 is reserved to initiate to the driver +that normal output policy selection should take place. One benefit to simply +leaving the qid for a slave to 0 is the multiqueue awareness in the bonding +driver that is now present. This awareness allows tc filters to be placed on +slave devices as well as bond devices and the bonding driver will simply act as +a pass-through for selecting output queues on the slave device rather than +output port selection. + +This feature first appeared in bonding driver version 3.7.0 and support for +output slave selection was limited to round-robin and active-backup modes. + +4 Querying Bonding Configuration ================================= 4.1 Bonding Configuration diff --git a/Documentation/networking/caif/spi_porting.txt b/Documentation/networking/caif/spi_porting.txt new file mode 100644 index 0000000000000000000000000000000000000000..61d7c9247453e54693375d70b853d35541b3abb6 --- /dev/null +++ b/Documentation/networking/caif/spi_porting.txt @@ -0,0 +1,208 @@ +- CAIF SPI porting - + +- CAIF SPI basics: + +Running CAIF over SPI needs some extra setup, owing to the nature of SPI. +Two extra GPIOs have been added in order to negotiate the transfers + between the master and the slave. The minimum requirement for running +CAIF over SPI is a SPI slave chip and two GPIOs (more details below). +Please note that running as a slave implies that you need to keep up +with the master clock. An overrun or underrun event is fatal. + +- CAIF SPI framework: + +To make porting as easy as possible, the CAIF SPI has been divided in +two parts. The first part (called the interface part) deals with all +generic functionality such as length framing, SPI frame negotiation +and SPI frame delivery and transmission. The other part is the CAIF +SPI slave device part, which is the module that you have to write if +you want to run SPI CAIF on a new hardware. This part takes care of +the physical hardware, both with regard to SPI and to GPIOs. + +- Implementing a CAIF SPI device: + + - Functionality provided by the CAIF SPI slave device: + + In order to implement a SPI device you will, as a minimum, + need to implement the following + functions: + + int (*init_xfer) (struct cfspi_xfer * xfer, struct cfspi_dev *dev): + + This function is called by the CAIF SPI interface to give + you a chance to set up your hardware to be ready to receive + a stream of data from the master. The xfer structure contains + both physical and logical adresses, as well as the total length + of the transfer in both directions.The dev parameter can be used + to map to different CAIF SPI slave devices. + + void (*sig_xfer) (bool xfer, struct cfspi_dev *dev): + + This function is called by the CAIF SPI interface when the output + (SPI_INT) GPIO needs to change state. The boolean value of the xfer + variable indicates whether the GPIO should be asserted (HIGH) or + deasserted (LOW). The dev parameter can be used to map to different CAIF + SPI slave devices. + + - Functionality provided by the CAIF SPI interface: + + void (*ss_cb) (bool assert, struct cfspi_ifc *ifc); + + This function is called by the CAIF SPI slave device in order to + signal a change of state of the input GPIO (SS) to the interface. + Only active edges are mandatory to be reported. + This function can be called from IRQ context (recommended in order + not to introduce latency). The ifc parameter should be the pointer + returned from the platform probe function in the SPI device structure. + + void (*xfer_done_cb) (struct cfspi_ifc *ifc); + + This function is called by the CAIF SPI slave device in order to + report that a transfer is completed. This function should only be + called once both the transmission and the reception are completed. + This function can be called from IRQ context (recommended in order + not to introduce latency). The ifc parameter should be the pointer + returned from the platform probe function in the SPI device structure. + + - Connecting the bits and pieces: + + - Filling in the SPI slave device structure: + + Connect the necessary callback functions. + Indicate clock speed (used to calculate toggle delays). + Chose a suitable name (helps debugging if you use several CAIF + SPI slave devices). + Assign your private data (can be used to map to your structure). + + - Filling in the SPI slave platform device structure: + Add name of driver to connect to ("cfspi_sspi"). + Assign the SPI slave device structure as platform data. + +- Padding: + +In order to optimize throughput, a number of SPI padding options are provided. +Padding can be enabled independently for uplink and downlink transfers. +Padding can be enabled for the head, the tail and for the total frame size. +The padding needs to be correctly configured on both sides of the link. +The padding can be changed via module parameters in cfspi_sspi.c or via +the sysfs directory of the cfspi_sspi driver (before device registration). + +- CAIF SPI device template: + +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Daniel Martensson / Daniel.Martensson@stericsson.com + * License terms: GNU General Public License (GPL), version 2. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); + +struct sspi_struct { + struct cfspi_dev sdev; + struct cfspi_xfer *xfer; +}; + +static struct sspi_struct slave; +static struct platform_device slave_device; + +static irqreturn_t sspi_irq(int irq, void *arg) +{ + /* You only need to trigger on an edge to the active state of the + * SS signal. Once a edge is detected, the ss_cb() function should be + * called with the parameter assert set to true. It is OK + * (and even advised) to call the ss_cb() function in IRQ context in + * order not to add any delay. */ + + return IRQ_HANDLED; +} + +static void sspi_complete(void *context) +{ + /* Normally the DMA or the SPI framework will call you back + * in something similar to this. The only thing you need to + * do is to call the xfer_done_cb() function, providing the pointer + * to the CAIF SPI interface. It is OK to call this function + * from IRQ context. */ +} + +static int sspi_init_xfer(struct cfspi_xfer *xfer, struct cfspi_dev *dev) +{ + /* Store transfer info. For a normal implementation you should + * set up your DMA here and make sure that you are ready to + * receive the data from the master SPI. */ + + struct sspi_struct *sspi = (struct sspi_struct *)dev->priv; + + sspi->xfer = xfer; + + return 0; +} + +void sspi_sig_xfer(bool xfer, struct cfspi_dev *dev) +{ + /* If xfer is true then you should assert the SPI_INT to indicate to + * the master that you are ready to recieve the data from the master + * SPI. If xfer is false then you should de-assert SPI_INT to indicate + * that the transfer is done. + */ + + struct sspi_struct *sspi = (struct sspi_struct *)dev->priv; +} + +static void sspi_release(struct device *dev) +{ + /* + * Here you should release your SPI device resources. + */ +} + +static int __init sspi_init(void) +{ + /* Here you should initialize your SPI device by providing the + * necessary functions, clock speed, name and private data. Once + * done, you can register your device with the + * platform_device_register() function. This function will return + * with the CAIF SPI interface initialized. This is probably also + * the place where you should set up your GPIOs, interrupts and SPI + * resources. */ + + int res = 0; + + /* Initialize slave device. */ + slave.sdev.init_xfer = sspi_init_xfer; + slave.sdev.sig_xfer = sspi_sig_xfer; + slave.sdev.clk_mhz = 13; + slave.sdev.priv = &slave; + slave.sdev.name = "spi_sspi"; + slave_device.dev.release = sspi_release; + + /* Initialize platform device. */ + slave_device.name = "cfspi_sspi"; + slave_device.dev.platform_data = &slave.sdev; + + /* Register platform device. */ + res = platform_device_register(&slave_device); + if (res) { + printk(KERN_WARNING "sspi_init: failed to register dev.\n"); + return -ENODEV; + } + + return res; +} + +static void __exit sspi_exit(void) +{ + platform_device_del(&slave_device); +} + +module_init(sspi_init); +module_exit(sspi_exit); diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index d0536b5a4e014afc6aa48f5d0c9a2f36ff91969a..f350c69b2bb4f807340416ffe7cf29cc8b02e662 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -903,7 +903,7 @@ arp_ignore - INTEGER arp_notify - BOOLEAN Define mode for notification of address and device changes. 0 - (default): do nothing - 1 - Generate gratuitous arp replies when device is brought up + 1 - Generate gratuitous arp requests when device is brought up or hardware address changes. arp_accept - BOOLEAN diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index 98f71a5cef004ef041c8f187cf007424080ec203..2546aa4dc23251a95bbe56440c123cb21fecf71e 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt @@ -493,6 +493,32 @@ The user can also use poll() to check if a buffer is available: pfd.events = POLLOUT; retval = poll(&pfd, 1, timeout); +------------------------------------------------------------------------------- ++ PACKET_TIMESTAMP +------------------------------------------------------------------------------- + +The PACKET_TIMESTAMP setting determines the source of the timestamp in +the packet meta information. If your NIC is capable of timestamping +packets in hardware, you can request those hardware timestamps to used. +Note: you may need to enable the generation of hardware timestamps with +SIOCSHWTSTAMP. + +PACKET_TIMESTAMP accepts the same integer bit field as +SO_TIMESTAMPING. However, only the SOF_TIMESTAMPING_SYS_HARDWARE +and SOF_TIMESTAMPING_RAW_HARDWARE values are recognized by +PACKET_TIMESTAMP. SOF_TIMESTAMPING_SYS_HARDWARE takes precedence over +SOF_TIMESTAMPING_RAW_HARDWARE if both bits are set. + + int req = 0; + req |= SOF_TIMESTAMPING_SYS_HARDWARE; + setsockopt(fd, SOL_PACKET, PACKET_TIMESTAMP, (void *) &req, sizeof(req)) + +If PACKET_TIMESTAMP is not set, a software timestamp generated inside +the networking stack is used (the behavior before this setting was added). + +See include/linux/net_tstamp.h and Documentation/networking/timestamping +for more information on hardware timestamps. + -------------------------------------------------------------------------------- + THANKS -------------------------------------------------------------------------------- diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt index 61bb645d50e061d7450f3f75d8805d2ca9308501..75e4fd708ccb3804904515648e6bf9de7a7a39ae 100644 --- a/Documentation/networking/pktgen.txt +++ b/Documentation/networking/pktgen.txt @@ -151,6 +151,8 @@ Examples: pgset stop aborts injection. Also, ^C aborts generator. + pgset "rate 300M" set rate to 300 Mb/s + pgset "ratep 1000000" set rate to 1Mpps Example scripts =============== @@ -241,6 +243,9 @@ src6 flows flowlen +rate +ratep + References: ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/ ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/ diff --git a/MAINTAINERS b/MAINTAINERS index 33606bb91f1f77e27906475a7725c0f9df02e2ac..7c3b67c34e52a4d5a98df91c60ca9183f6f11f4f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -313,11 +313,9 @@ S: Maintained F: drivers/hwmon/adm1029.c ADM8211 WIRELESS DRIVER -M: Michael Wu L: linux-wireless@vger.kernel.org W: http://linuxwireless.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git -S: Maintained +S: Orphan F: drivers/net/wireless/adm8211.* ADT746X FAN DRIVER @@ -1369,7 +1367,7 @@ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER M: Eilon Greenstein L: netdev@vger.kernel.org S: Supported -F: drivers/net/bnx2x* +F: drivers/net/bnx2x/ BROADCOM TG3 GIGABIT ETHERNET DRIVER M: Matt Carlson @@ -1771,6 +1769,13 @@ W: http://www.openfabrics.org S: Supported F: drivers/infiniband/hw/cxgb4/ +CXGB4VF ETHERNET DRIVER (CXGB4VF) +M: Casey Leedom +L: netdev@vger.kernel.org +W: http://www.chelsio.com +S: Supported +F: drivers/net/cxgb4vf/ + CYBERPRO FB DRIVER M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -3686,7 +3691,7 @@ F: include/linux/mv643xx.h MARVELL MWL8K WIRELESS DRIVER M: Lennert Buytenhek L: linux-wireless@vger.kernel.org -S: Maintained +S: Odd Fixes F: drivers/net/wireless/mwl8k.c MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER @@ -3915,17 +3920,19 @@ L: netem@lists.linux-foundation.org S: Maintained F: net/sched/sch_netem.c -NETERION (S2IO) 10GbE DRIVER (xframe/vxge) -M: Ramkrishna Vepa -M: Rastapur Santosh -M: Sivakumar Subramani -M: Sreenivasa Honnur +NETERION 10GbE DRIVERS (s2io/vxge) +M: Ramkrishna Vepa +M: Sivakumar Subramani +M: Sreenivasa Honnur +M: Jon Mason L: netdev@vger.kernel.org W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous S: Supported F: Documentation/networking/s2io.txt F: drivers/net/s2io* +F: Documentation/networking/vxge.txt +F: drivers/net/vxge/ NETFILTER/IPTABLES/IPCHAINS P: Rusty Russell @@ -4272,10 +4279,9 @@ F: include/scsi/osd_* F: fs/exofs/ P54 WIRELESS DRIVER -M: Michael Wu +M: Christian Lamparter L: linux-wireless@vger.kernel.org -W: http://prism54.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git +W: http://wireless.kernel.org/en/users/Drivers/p54 S: Maintained F: drivers/net/wireless/p54/ @@ -4537,7 +4543,7 @@ PRISM54 WIRELESS DRIVER M: "Luis R. Rodriguez" L: linux-wireless@vger.kernel.org W: http://prism54.org -S: Maintained +S: Obsolete F: drivers/net/wireless/prism54/ PROMISE DC4030 CACHING DISK CONTROLLER DRIVER @@ -4733,9 +4739,8 @@ S: Maintained F: drivers/rapidio/ RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER -M: Corey Thomas L: linux-wireless@vger.kernel.org -S: Maintained +S: Orphan F: drivers/net/wireless/ray* RCUTORTURE MODULE @@ -6068,10 +6073,9 @@ F: Documentation/video4linux/zc0301.txt F: drivers/media/video/zc0301/ USB ZD1201 DRIVER -M: Jeroen Vreeken -L: linux-usb@vger.kernel.org +L: linux-wireless@vger.kernel.org W: http://linux-lc100020.sourceforge.net -S: Maintained +S: Orphan F: drivers/net/wireless/zd1201.* USB ZR364XX DRIVER @@ -6259,14 +6263,6 @@ F: Documentation/watchdog/ F: drivers/watchdog/ F: include/linux/watchdog.h -WAVELAN NETWORK DRIVER & WIRELESS EXTENSIONS -M: Jean Tourrilhes -L: linux-wireless@vger.kernel.org -W: http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/ -S: Maintained -F: Documentation/networking/wavelan.txt -F: drivers/staging/wavelan/ - WD7000 SCSI DRIVER M: Miroslav Zagorac L: linux-scsi@vger.kernel.org diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h index 48c4f0335e3f51b74413a93783da6f1fb8f0970d..81e1f7d5b4cbd50e683d0824daf6ccf569d1bcf0 100644 --- a/arch/microblaze/include/asm/system.h +++ b/arch/microblaze/include/asm/system.h @@ -101,10 +101,7 @@ extern struct dentry *of_debugfs_root; * MicroBlaze doesn't handle unaligned accesses in hardware. * * Based on this we force the IP header alignment in network drivers. - * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining - * cacheline alignment of buffers. */ #define NET_IP_ALIGN 2 -#define NET_SKB_PAD L1_CACHE_BYTES #endif /* _ASM_MICROBLAZE_SYSTEM_H */ diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index a6297c67c3d6ab4e0e4a81246342dbf282dedb27..6c294acac848145d868d9f936c5df5ca879b1920 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -515,11 +515,8 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, * powers of 2 writes until it reaches sufficient alignment). * * Based on this we disable the IP header alignment in network drivers. - * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining - * cacheline alignment of buffers. */ #define NET_IP_ALIGN 0 -#define NET_SKB_PAD L1_CACHE_BYTES #define cmpxchg64(ptr, o, n) \ ({ \ diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 9a9586f4103f5d4bbea2a1f5d5a0c27b1453e5f4..f02e89ce4df1096cc95deabacae29486734648c4 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -85,7 +85,8 @@ static void appldata_get_net_sum_data(void *data) rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { - const struct net_device_stats *stats = dev_get_stats(dev); + struct rtnl_link_stats64 temp; + const struct net_device_stats *stats = dev_get_stats(dev, &temp); rx_packets += stats->rx_packets; tx_packets += stats->tx_packets; diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index f05372694233a55734500b8aa3578a0338f00d43..2ab233ba32c1564f8323884017108b0d53978366 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -25,11 +25,6 @@ #include "net_kern.h" #include "net_user.h" -static inline void set_ether_mac(struct net_device *dev, unsigned char *addr) -{ - memcpy(dev->dev_addr, addr, ETH_ALEN); -} - #define DRIVER_NAME "uml-netdev" static DEFINE_SPINLOCK(opened_lock); @@ -266,7 +261,7 @@ static int uml_net_set_mac(struct net_device *dev, void *addr) struct sockaddr *hwaddr = addr; spin_lock_irq(&lp->lock); - set_ether_mac(dev, hwaddr->sa_data); + eth_mac_addr(dev, hwaddr->sa_data); spin_unlock_irq(&lp->lock); return 0; @@ -380,7 +375,6 @@ static const struct net_device_ops uml_netdev_ops = { .ndo_tx_timeout = uml_net_tx_timeout, .ndo_set_mac_address = uml_net_set_mac, .ndo_change_mtu = uml_net_change_mtu, - .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -478,7 +472,7 @@ static void eth_configure(int n, void *init, char *mac, ((*transport->user->init)(&lp->user, dev) != 0)) goto out_unregister; - set_ether_mac(dev, device->mac); + eth_mac_addr(dev, device->mac); dev->mtu = transport->user->mtu; dev->netdev_ops = ¨_netdev_ops; dev->ethtool_ops = ¨_net_ethtool_ops; diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index e7f4d33c55edbb80b8ec52d9793a45d3c8854519..33ecc3ea8782add8f88e68405d227ee4af700134 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -457,4 +457,11 @@ static __always_inline void rdtsc_barrier(void) alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); } +/* + * We handle most unaligned accesses in hardware. On the other hand + * unaligned DMA can be quite expensive on some Nehalem processors. + * + * Based on this we disable the IP header alignment in network drivers. + */ +#define NET_IP_ALIGN 0 #endif /* _ASM_X86_SYSTEM_H */ diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index f1a0a00b3b07c8f8563f1cbac1304f9fd8a5f5b3..be7461c9a87e40ed0ce53590d59979dac032b4fa 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -177,7 +177,7 @@ config ATM_ZATM_DEBUG config ATM_NICSTAR tristate "IDT 77201 (NICStAR) (ForeRunnerLE)" - depends on PCI && !64BIT && VIRT_TO_BUS + depends on PCI help The NICStAR chipset family is used in a large number of ATM NICs for 25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index 6d44f07b69f8fc656b75397c26552f4170b7366d..46b94762125b0b179792948cbd47be8a9bc8448f 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -40,6 +40,42 @@ struct adummy_dev { static LIST_HEAD(adummy_devs); +static ssize_t __set_signal(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct atm_dev *atm_dev = container_of(dev, struct atm_dev, class_dev); + int signal; + + if (sscanf(buf, "%d", &signal) == 1) { + + if (signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND) + signal = ATM_PHY_SIG_UNKNOWN; + + atm_dev_signal_change(atm_dev, signal); + return 1; + } + return -EINVAL; +} + +static ssize_t __show_signal(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct atm_dev *atm_dev = container_of(dev, struct atm_dev, class_dev); + return sprintf(buf, "%d\n", atm_dev->signal); +} +static DEVICE_ATTR(signal, 0644, __show_signal, __set_signal); + +static struct attribute *adummy_attrs[] = { + &dev_attr_signal.attr, + NULL +}; + +static struct attribute_group adummy_group_attrs = { + .name = NULL, /* We want them in dev's root folder */ + .attrs = adummy_attrs +}; + static int __init adummy_start(struct atm_dev *dev) { @@ -128,6 +164,9 @@ static int __init adummy_init(void) adummy_dev->atm_dev = atm_dev; atm_dev->dev_data = adummy_dev; + if (sysfs_create_group(&atm_dev->class_dev.kobj, &adummy_group_attrs)) + dev_err(&atm_dev->class_dev, "Could not register attrs for adummy\n"); + if (adummy_start(atm_dev)) { printk(KERN_ERR DEV_LABEL ": adummy_start() failed\n"); err = -ENODEV; diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 9d18644c897e9f43ef6d8f7cd93d90c237d98322..a33896a482e6eb0e0dd964649b5b6d7d90e64858 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -2371,10 +2371,8 @@ MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); /********** module entry **********/ static struct pci_device_id amb_pci_tbl[] = { - { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 }, - { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 }, + { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 }, + { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 }, { 0, } }; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 90a5a7cac740ed195c9a73b62d3288ec3db697fe..80f9f3659e4d83f246bafb936c2490ff15455c4a 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2269,10 +2269,8 @@ static int __devinit eni_init_one(struct pci_dev *pci_dev, static struct pci_device_id eni_pci_tbl[] = { - { PCI_VENDOR_ID_EF, PCI_DEVICE_ID_EF_ATM_FPGA, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 /* FPGA */ }, - { PCI_VENDOR_ID_EF, PCI_DEVICE_ID_EF_ATM_ASIC, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 1 /* ASIC */ }, + { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ }, + { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ }, { 0, } }; MODULE_DEVICE_TABLE(pci,eni_pci_tbl); diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 6e600afd06ae6d0fbbaeb96d7aa02093954771b5..8717809787fb7af21069dd726e11abbdcff70ee5 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -2027,10 +2027,8 @@ static void __devexit firestream_remove_one (struct pci_dev *pdev) } static struct pci_device_id firestream_pci_tbl[] = { - { PCI_VENDOR_ID_FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, FS_IS50}, - { PCI_VENDOR_ID_FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, FS_IS155}, + { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50}, + { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155}, { 0, } }; diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 56c2e99e458fd7ea85e25f4ee3d5b7e0ed9aa261..801e8b6e9d1fee08dd1b3d091ff3739193ee98cc 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -67,6 +67,7 @@ #include #include #include +#include #include #include #include @@ -778,61 +779,39 @@ he_init_cs_block_rcm(struct he_dev *he_dev) static int __devinit he_init_group(struct he_dev *he_dev, int group) { + struct he_buff *heb, *next; + dma_addr_t mapping; int i; - /* small buffer pool */ - he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev, - CONFIG_RBPS_BUFSIZE, 8, 0); - if (he_dev->rbps_pool == NULL) { - hprintk("unable to create rbps pages\n"); + he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); + he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); + he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); + he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), + G0_RBPS_BS + (group * 32)); + + /* bitmap table */ + he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE) + * sizeof(unsigned long), GFP_KERNEL); + if (!he_dev->rbpl_table) { + hprintk("unable to allocate rbpl bitmap table\n"); return -ENOMEM; } + bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE); - he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev, - CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys); - if (he_dev->rbps_base == NULL) { - hprintk("failed to alloc rbps_base\n"); - goto out_destroy_rbps_pool; + /* rbpl_virt 64-bit pointers */ + he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE + * sizeof(struct he_buff *), GFP_KERNEL); + if (!he_dev->rbpl_virt) { + hprintk("unable to allocate rbpl virt table\n"); + goto out_free_rbpl_table; } - memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp)); - he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL); - if (he_dev->rbps_virt == NULL) { - hprintk("failed to alloc rbps_virt\n"); - goto out_free_rbps_base; - } - - for (i = 0; i < CONFIG_RBPS_SIZE; ++i) { - dma_addr_t dma_handle; - void *cpuaddr; - - cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle); - if (cpuaddr == NULL) - goto out_free_rbps_virt; - - he_dev->rbps_virt[i].virt = cpuaddr; - he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF); - he_dev->rbps_base[i].phys = dma_handle; - - } - he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1]; - - he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32)); - he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), - G0_RBPS_T + (group * 32)); - he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4, - G0_RBPS_BS + (group * 32)); - he_writel(he_dev, - RBP_THRESH(CONFIG_RBPS_THRESH) | - RBP_QSIZE(CONFIG_RBPS_SIZE - 1) | - RBP_INT_ENB, - G0_RBPS_QI + (group * 32)); /* large buffer pool */ he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, - CONFIG_RBPL_BUFSIZE, 8, 0); + CONFIG_RBPL_BUFSIZE, 64, 0); if (he_dev->rbpl_pool == NULL) { hprintk("unable to create rbpl pool\n"); - goto out_free_rbps_virt; + goto out_free_rbpl_virt; } he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, @@ -842,30 +821,29 @@ he_init_group(struct he_dev *he_dev, int group) goto out_destroy_rbpl_pool; } memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); - he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL); - if (he_dev->rbpl_virt == NULL) { - hprintk("failed to alloc rbpl_virt\n"); - goto out_free_rbpl_base; - } + + INIT_LIST_HEAD(&he_dev->rbpl_outstanding); for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { - dma_addr_t dma_handle; - void *cpuaddr; - cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle); - if (cpuaddr == NULL) - goto out_free_rbpl_virt; + heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping); + if (!heb) + goto out_free_rbpl; + heb->mapping = mapping; + list_add(&heb->entry, &he_dev->rbpl_outstanding); - he_dev->rbpl_virt[i].virt = cpuaddr; - he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF); - he_dev->rbpl_base[i].phys = dma_handle; + set_bit(i, he_dev->rbpl_table); + he_dev->rbpl_virt[i] = heb; + he_dev->rbpl_hint = i + 1; + he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET; + he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); } he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T + (group * 32)); - he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4, + he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4, G0_RBPL_BS + (group * 32)); he_writel(he_dev, RBP_THRESH(CONFIG_RBPL_THRESH) | @@ -879,7 +857,7 @@ he_init_group(struct he_dev *he_dev, int group) CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); if (he_dev->rbrq_base == NULL) { hprintk("failed to allocate rbrq\n"); - goto out_free_rbpl_virt; + goto out_free_rbpl; } memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); @@ -920,33 +898,20 @@ he_init_group(struct he_dev *he_dev, int group) pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), he_dev->rbrq_base, he_dev->rbrq_phys); - i = CONFIG_RBPL_SIZE; -out_free_rbpl_virt: - while (i--) - pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt, - he_dev->rbpl_base[i].phys); - kfree(he_dev->rbpl_virt); +out_free_rbpl: + list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) + pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); -out_free_rbpl_base: pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); out_destroy_rbpl_pool: pci_pool_destroy(he_dev->rbpl_pool); +out_free_rbpl_virt: + kfree(he_dev->rbpl_virt); +out_free_rbpl_table: + kfree(he_dev->rbpl_table); - i = CONFIG_RBPS_SIZE; -out_free_rbps_virt: - while (i--) - pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt, - he_dev->rbps_base[i].phys); - kfree(he_dev->rbps_virt); - -out_free_rbps_base: - pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE * - sizeof(struct he_rbp), he_dev->rbps_base, - he_dev->rbps_phys); -out_destroy_rbps_pool: - pci_pool_destroy(he_dev->rbps_pool); return -ENOMEM; } @@ -1002,7 +967,8 @@ he_init_irq(struct he_dev *he_dev) he_writel(he_dev, 0x0, GRP_54_MAP); he_writel(he_dev, 0x0, GRP_76_MAP); - if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) { + if (request_irq(he_dev->pci_dev->irq, + he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) { hprintk("irq %d already in use\n", he_dev->pci_dev->irq); return -EINVAL; } @@ -1576,9 +1542,10 @@ he_start(struct atm_dev *dev) static void he_stop(struct he_dev *he_dev) { - u16 command; - u32 gen_cntl_0, reg; + struct he_buff *heb, *next; struct pci_dev *pci_dev; + u32 gen_cntl_0, reg; + u16 command; pci_dev = he_dev->pci_dev; @@ -1619,37 +1586,19 @@ he_stop(struct he_dev *he_dev) he_dev->hsp, he_dev->hsp_phys); if (he_dev->rbpl_base) { - int i; - - for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { - void *cpuaddr = he_dev->rbpl_virt[i].virt; - dma_addr_t dma_handle = he_dev->rbpl_base[i].phys; + list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) + pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); - pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle); - } pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); } + kfree(he_dev->rbpl_virt); + kfree(he_dev->rbpl_table); + if (he_dev->rbpl_pool) pci_pool_destroy(he_dev->rbpl_pool); - if (he_dev->rbps_base) { - int i; - - for (i = 0; i < CONFIG_RBPS_SIZE; ++i) { - void *cpuaddr = he_dev->rbps_virt[i].virt; - dma_addr_t dma_handle = he_dev->rbps_base[i].phys; - - pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle); - } - pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE - * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys); - } - - if (he_dev->rbps_pool) - pci_pool_destroy(he_dev->rbps_pool); - if (he_dev->rbrq_base) pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), he_dev->rbrq_base, he_dev->rbrq_phys); @@ -1679,13 +1628,13 @@ static struct he_tpd * __alloc_tpd(struct he_dev *he_dev) { struct he_tpd *tpd; - dma_addr_t dma_handle; + dma_addr_t mapping; - tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle); + tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping); if (tpd == NULL) return NULL; - tpd->status = TPD_ADDR(dma_handle); + tpd->status = TPD_ADDR(mapping); tpd->reserved = 0; tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; @@ -1714,13 +1663,12 @@ he_service_rbrq(struct he_dev *he_dev, int group) struct he_rbrq *rbrq_tail = (struct he_rbrq *) ((unsigned long)he_dev->rbrq_base | he_dev->hsp->group[group].rbrq_tail); - struct he_rbp *rbp = NULL; unsigned cid, lastcid = -1; - unsigned buf_len = 0; struct sk_buff *skb; struct atm_vcc *vcc = NULL; struct he_vcc *he_vcc; - struct he_iovec *iov; + struct he_buff *heb, *next; + int i; int pdus_assembled = 0; int updated = 0; @@ -1740,44 +1688,35 @@ he_service_rbrq(struct he_dev *he_dev, int group) RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); - if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF) - rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))]; - else - rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))]; - - buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; - cid = RBRQ_CID(he_dev->rbrq_head); + i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET; + heb = he_dev->rbpl_virt[i]; + cid = RBRQ_CID(he_dev->rbrq_head); if (cid != lastcid) vcc = __find_vcc(he_dev, cid); lastcid = cid; - if (vcc == NULL) { - hprintk("vcc == NULL (cid 0x%x)\n", cid); - if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) - rbp->status &= ~RBP_LOANED; + if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) { + hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid); + if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { + clear_bit(i, he_dev->rbpl_table); + list_del(&heb->entry); + pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); + } goto next_rbrq_entry; } - he_vcc = HE_VCC(vcc); - if (he_vcc == NULL) { - hprintk("he_vcc == NULL (cid 0x%x)\n", cid); - if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) - rbp->status &= ~RBP_LOANED; - goto next_rbrq_entry; - } - if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { hprintk("HBUF_ERR! (cid 0x%x)\n", cid); atomic_inc(&vcc->stats->rx_drop); goto return_host_buffers; } - he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head); - he_vcc->iov_tail->iov_len = buf_len; - he_vcc->pdu_len += buf_len; - ++he_vcc->iov_tail; + heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; + clear_bit(i, he_dev->rbpl_table); + list_move_tail(&heb->entry, &he_vcc->buffers); + he_vcc->pdu_len += heb->len; if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { lastcid = -1; @@ -1786,12 +1725,6 @@ he_service_rbrq(struct he_dev *he_dev, int group) goto return_host_buffers; } -#ifdef notdef - if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) { - hprintk("iovec full! cid 0x%x\n", cid); - goto return_host_buffers; - } -#endif if (!RBRQ_END_PDU(he_dev->rbrq_head)) goto next_rbrq_entry; @@ -1819,15 +1752,8 @@ he_service_rbrq(struct he_dev *he_dev, int group) __net_timestamp(skb); - for (iov = he_vcc->iov_head; - iov < he_vcc->iov_tail; ++iov) { - if (iov->iov_base & RBP_SMALLBUF) - memcpy(skb_put(skb, iov->iov_len), - he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len); - else - memcpy(skb_put(skb, iov->iov_len), - he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len); - } + list_for_each_entry(heb, &he_vcc->buffers, entry) + memcpy(skb_put(skb, heb->len), &heb->data, heb->len); switch (vcc->qos.aal) { case ATM_AAL0: @@ -1867,17 +1793,9 @@ he_service_rbrq(struct he_dev *he_dev, int group) return_host_buffers: ++pdus_assembled; - for (iov = he_vcc->iov_head; - iov < he_vcc->iov_tail; ++iov) { - if (iov->iov_base & RBP_SMALLBUF) - rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)]; - else - rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)]; - - rbp->status &= ~RBP_LOANED; - } - - he_vcc->iov_tail = he_vcc->iov_head; + list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) + pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); + INIT_LIST_HEAD(&he_vcc->buffers); he_vcc->pdu_len = 0; next_rbrq_entry: @@ -1978,59 +1896,51 @@ he_service_tbrq(struct he_dev *he_dev, int group) } } - static void he_service_rbpl(struct he_dev *he_dev, int group) { - struct he_rbp *newtail; + struct he_rbp *new_tail; struct he_rbp *rbpl_head; + struct he_buff *heb; + dma_addr_t mapping; + int i; int moved = 0; rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); for (;;) { - newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | + new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | RBPL_MASK(he_dev->rbpl_tail+1)); /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ - if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED)) + if (new_tail == rbpl_head) break; - newtail->status |= RBP_LOANED; - he_dev->rbpl_tail = newtail; - ++moved; - } - - if (moved) - he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); -} - -static void -he_service_rbps(struct he_dev *he_dev, int group) -{ - struct he_rbp *newtail; - struct he_rbp *rbps_head; - int moved = 0; - - rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base | - RBPS_MASK(he_readl(he_dev, G0_RBPS_S))); - - for (;;) { - newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base | - RBPS_MASK(he_dev->rbps_tail+1)); + i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint); + if (i > (RBPL_TABLE_SIZE - 1)) { + i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE); + if (i > (RBPL_TABLE_SIZE - 1)) + break; + } + he_dev->rbpl_hint = i + 1; - /* table 3.42 -- rbps_tail should never be set to rbps_head */ - if ((newtail == rbps_head) || (newtail->status & RBP_LOANED)) + heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping); + if (!heb) break; - - newtail->status |= RBP_LOANED; - he_dev->rbps_tail = newtail; + heb->mapping = mapping; + list_add(&heb->entry, &he_dev->rbpl_outstanding); + he_dev->rbpl_virt[i] = heb; + set_bit(i, he_dev->rbpl_table); + new_tail->idx = i << RBP_IDX_OFFSET; + new_tail->phys = mapping + offsetof(struct he_buff, data); + + he_dev->rbpl_tail = new_tail; ++moved; } if (moved) - he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T); + he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); } static void @@ -2055,10 +1965,8 @@ he_tasklet(unsigned long data) HPRINTK("rbrq%d threshold\n", group); /* fall through */ case ITYPE_RBRQ_TIMER: - if (he_service_rbrq(he_dev, group)) { + if (he_service_rbrq(he_dev, group)) he_service_rbpl(he_dev, group); - he_service_rbps(he_dev, group); - } break; case ITYPE_TBRQ_THRESH: HPRINTK("tbrq%d threshold\n", group); @@ -2070,7 +1978,7 @@ he_tasklet(unsigned long data) he_service_rbpl(he_dev, group); break; case ITYPE_RBPS_THRESH: - he_service_rbps(he_dev, group); + /* shouldn't happen unless small buffers enabled */ break; case ITYPE_PHY: HPRINTK("phy interrupt\n"); @@ -2098,7 +2006,6 @@ he_tasklet(unsigned long data) he_service_rbrq(he_dev, 0); he_service_rbpl(he_dev, 0); - he_service_rbps(he_dev, 0); he_service_tbrq(he_dev, 0); break; default: @@ -2252,7 +2159,7 @@ he_open(struct atm_vcc *vcc) return -ENOMEM; } - he_vcc->iov_tail = he_vcc->iov_head; + INIT_LIST_HEAD(&he_vcc->buffers); he_vcc->pdu_len = 0; he_vcc->rc_index = -1; @@ -2406,8 +2313,8 @@ he_open(struct atm_vcc *vcc) goto open_failed; } - rsr1 = RSR1_GROUP(0); - rsr4 = RSR4_GROUP(0); + rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY; + rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY; rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0; @@ -2963,8 +2870,7 @@ module_param(sdh, bool, 0); MODULE_PARM_DESC(sdh, "use SDH framing (default 0)"); static struct pci_device_id he_pci_tbl[] = { - { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 }, + { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 }, { 0, } }; diff --git a/drivers/atm/he.h b/drivers/atm/he.h index c2983e0d4ec1c9b40889dda7e169779888a0d30a..110a27d2ecfc5b4e4aa9a9a6e8762117be73db8f 100644 --- a/drivers/atm/he.h +++ b/drivers/atm/he.h @@ -67,11 +67,6 @@ #define CONFIG_RBPL_BUFSIZE 4096 #define RBPL_MASK(x) (((unsigned long)(x))&((CONFIG_RBPL_SIZE<<3)-1)) -#define CONFIG_RBPS_SIZE 1024 -#define CONFIG_RBPS_THRESH 64 -#define CONFIG_RBPS_BUFSIZE 128 -#define RBPS_MASK(x) (((unsigned long)(x))&((CONFIG_RBPS_SIZE<<3)-1)) - /* 5.1.3 initialize connection memory */ #define CONFIG_RSRA 0x00000 @@ -203,36 +198,37 @@ struct he_hsp { } group[HE_NUM_GROUPS]; }; -/* figure 2.9 receive buffer pools */ +/* + * figure 2.9 receive buffer pools + * + * since a virtual address might be more than 32 bits, we store an index + * in the virt member of he_rbp. NOTE: the lower six bits in the rbrq + * addr member are used for buffer status further limiting us to 26 bits. + */ struct he_rbp { volatile u32 phys; - volatile u32 status; + volatile u32 idx; /* virt */ }; -/* NOTE: it is suggested that virt be the virtual address of the host - buffer. on a 64-bit machine, this would not work. Instead, we - store the real virtual address in another list, and store an index - (and buffer status) in the virt member. -*/ +#define RBP_IDX_OFFSET 6 -#define RBP_INDEX_OFF 6 -#define RBP_INDEX(x) (((long)(x) >> RBP_INDEX_OFF) & 0xffff) -#define RBP_LOANED 0x80000000 -#define RBP_SMALLBUF 0x40000000 +/* + * the he dma engine will try to hold an extra 16 buffers in its local + * caches. and add a couple buffers for safety. + */ -struct he_virt { - void *virt; -}; +#define RBPL_TABLE_SIZE (CONFIG_RBPL_SIZE + 16 + 2) -#define RBPL_ALIGNMENT CONFIG_RBPL_SIZE -#define RBPS_ALIGNMENT CONFIG_RBPS_SIZE +struct he_buff { + struct list_head entry; + dma_addr_t mapping; + unsigned long len; + u8 data[]; +}; #ifdef notyet struct he_group { - u32 rpbs_size, rpbs_qsize; - struct he_rbp rbps_ba; - u32 rpbl_size, rpbl_qsize; struct he_rpb_entry *rbpl_ba; }; @@ -297,18 +293,15 @@ struct he_dev { struct he_rbrq *rbrq_base, *rbrq_head; int rbrq_peak; + struct he_buff **rbpl_virt; + unsigned long *rbpl_table; + unsigned long rbpl_hint; struct pci_pool *rbpl_pool; dma_addr_t rbpl_phys; struct he_rbp *rbpl_base, *rbpl_tail; - struct he_virt *rbpl_virt; + struct list_head rbpl_outstanding; int rbpl_peak; - struct pci_pool *rbps_pool; - dma_addr_t rbps_phys; - struct he_rbp *rbps_base, *rbps_tail; - struct he_virt *rbps_virt; - int rbps_peak; - dma_addr_t tbrq_phys; struct he_tbrq *tbrq_base, *tbrq_head; int tbrq_peak; @@ -321,20 +314,12 @@ struct he_dev { struct he_dev *next; }; -struct he_iovec -{ - u32 iov_base; - u32 iov_len; -}; - #define HE_MAXIOV 20 struct he_vcc { - struct he_iovec iov_head[HE_MAXIOV]; - struct he_iovec *iov_tail; + struct list_head buffers; int pdu_len; - int rc_index; wait_queue_head_t rx_waitq; diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index dab5cf5274fb4d6c7a37915f9d2d7e2f86d546e8..bca9cb89a118b02525fbf68ad7071e755f544c5c 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -126,7 +126,7 @@ static void idt77105_restart_timer_func(unsigned long dummy) istat = GET(ISTAT); /* side effect: clears all interrupt status bits */ if (istat & IDT77105_ISTAT_GOODSIG) { /* Found signal again */ - dev->signal = ATM_PHY_SIG_FOUND; + atm_dev_signal_change(dev, ATM_PHY_SIG_FOUND); printk(KERN_NOTICE "%s(itf %d): signal detected again\n", dev->type,dev->number); /* flush the receive FIFO */ @@ -222,7 +222,7 @@ static void idt77105_int(struct atm_dev *dev) /* Rx Signal Condition Change - line went up or down */ if (istat & IDT77105_ISTAT_GOODSIG) { /* signal detected again */ /* This should not happen (restart timer does it) but JIC */ - dev->signal = ATM_PHY_SIG_FOUND; + atm_dev_signal_change(dev, ATM_PHY_SIG_FOUND); } else { /* signal lost */ /* * Disable interrupts and stop all transmission and @@ -235,7 +235,7 @@ static void idt77105_int(struct atm_dev *dev) IDT77105_MCR_DRIC| IDT77105_MCR_HALTTX ) & ~IDT77105_MCR_EIP, MCR); - dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(dev, ATM_PHY_SIG_LOST); printk(KERN_NOTICE "%s(itf %d): signal lost\n", dev->type,dev->number); } @@ -272,8 +272,9 @@ static int idt77105_start(struct atm_dev *dev) memset(&PRIV(dev)->stats,0,sizeof(struct idt77105_stats)); /* initialise dev->signal from Good Signal Bit */ - dev->signal = GET(ISTAT) & IDT77105_ISTAT_GOODSIG ? ATM_PHY_SIG_FOUND : - ATM_PHY_SIG_LOST; + atm_dev_signal_change(dev, + GET(ISTAT) & IDT77105_ISTAT_GOODSIG ? + ATM_PHY_SIG_FOUND : ATM_PHY_SIG_LOST); if (dev->signal == ATM_PHY_SIG_LOST) printk(KERN_WARNING "%s(itf %d): no signal\n",dev->type, dev->number); diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 98657a6a330d00a5ad8f1489e34784cfa3085a37..1679cbf0c5840ace7e8e47f682601f7d35a1273a 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3364,7 +3364,7 @@ init_card(struct atm_dev *dev) writel(SAR_STAT_TMROF, SAR_REG_STAT); } IPRINTK("%s: Request IRQ ... ", card->name); - if (request_irq(pcidev->irq, idt77252_interrupt, IRQF_DISABLED|IRQF_SHARED, + if (request_irq(pcidev->irq, idt77252_interrupt, IRQF_SHARED, card->name, card) != 0) { printk("%s: can't allocate IRQ.\n", card->name); deinit_card(card); @@ -3779,8 +3779,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) static struct pci_device_id idt77252_pci_tbl[] = { - { PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77252, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77252), 0 }, { 0, } }; diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index b7473a6110a7600b8bbfdbab35a3289618cd12c7..2f3516b7f118cd59e7b9156ba1b004a6caee6c86 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -1,5 +1,4 @@ -/****************************************************************************** - * +/* * nicstar.c * * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. @@ -16,12 +15,10 @@ * * * (C) INESC 1999 - * - * - ******************************************************************************/ - + */ -/**** IMPORTANT INFORMATION *************************************************** +/* + * IMPORTANT INFORMATION * * There are currently three types of spinlocks: * @@ -31,9 +28,9 @@ * * These must NEVER be grabbed in reverse order. * - ******************************************************************************/ + */ -/* Header files ***************************************************************/ +/* Header files */ #include #include @@ -41,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -50,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -61,16 +60,11 @@ #include "idt77105.h" #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ -#if BITS_PER_LONG != 32 -# error FIXME: this driver requires a 32-bit platform -#endif - -/* Additional code ************************************************************/ +/* Additional code */ #include "nicstarmac.c" - -/* Configurable parameters ****************************************************/ +/* Configurable parameters */ #undef PHY_LOOPBACK #undef TX_DEBUG @@ -78,11 +72,10 @@ #undef GENERAL_DEBUG #undef EXTRA_DEBUG -#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know - you're going to use only raw ATM */ +#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know + you're going to use only raw ATM */ - -/* Do not touch these *********************************************************/ +/* Do not touch these */ #ifdef TX_DEBUG #define TXPRINTK(args...) printk(args) @@ -108,2908 +101,2773 @@ #define XPRINTK(args...) #endif /* EXTRA_DEBUG */ - -/* Macros *********************************************************************/ +/* Macros */ #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) #define NS_DELAY mdelay(1) -#define ALIGN_BUS_ADDR(addr, alignment) \ - ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1)) -#define ALIGN_ADDRESS(addr, alignment) \ - bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment)) - -#undef CEIL +#define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b))) #ifndef ATM_SKB #define ATM_SKB(s) (&(s)->atm) #endif +#define scq_virt_to_bus(scq, p) \ + (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) -/* Function declarations ******************************************************/ +/* Function declarations */ -static u32 ns_read_sram(ns_dev *card, u32 sram_address); -static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count); +static u32 ns_read_sram(ns_dev * card, u32 sram_address); +static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, + int count); static int __devinit ns_init_card(int i, struct pci_dev *pcidev); -static void __devinit ns_init_card_error(ns_dev *card, int error); -static scq_info *get_scq(int size, u32 scd); -static void free_scq(scq_info *scq, struct atm_vcc *vcc); +static void __devinit ns_init_card_error(ns_dev * card, int error); +static scq_info *get_scq(ns_dev *card, int size, u32 scd); +static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); static void push_rxbufs(ns_dev *, struct sk_buff *); static irqreturn_t ns_irq_handler(int irq, void *dev_id); static int ns_open(struct atm_vcc *vcc); static void ns_close(struct atm_vcc *vcc); -static void fill_tst(ns_dev *card, int n, vc_map *vc); +static void fill_tst(ns_dev * card, int n, vc_map * vc); static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); -static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, - struct sk_buff *skb); -static void process_tsq(ns_dev *card); -static void drain_scq(ns_dev *card, scq_info *scq, int pos); -static void process_rsq(ns_dev *card); -static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe); +static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, + struct sk_buff *skb); +static void process_tsq(ns_dev * card); +static void drain_scq(ns_dev * card, scq_info * scq, int pos); +static void process_rsq(ns_dev * card); +static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb); static void ns_lb_destructor(struct sk_buff *lb); static void ns_hb_destructor(struct sk_buff *hb); #endif /* NS_USE_DESTRUCTORS */ -static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb); -static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count); -static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb); -static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb); -static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb); -static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page); -static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); -static void which_list(ns_dev *card, struct sk_buff *skb); +static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); +static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); +static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); +static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); +static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); +static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); +static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); +#ifdef EXTRA_DEBUG +static void which_list(ns_dev * card, struct sk_buff *skb); +#endif static void ns_poll(unsigned long arg); static int ns_parse_mac(char *mac, unsigned char *esi); -static short ns_h2i(char c); static void ns_phy_put(struct atm_dev *dev, unsigned char value, - unsigned long addr); + unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); - - -/* Global variables ***********************************************************/ +/* Global variables */ static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; -static struct atmdev_ops atm_ops = -{ - .open = ns_open, - .close = ns_close, - .ioctl = ns_ioctl, - .send = ns_send, - .phy_put = ns_phy_put, - .phy_get = ns_phy_get, - .proc_read = ns_proc_read, - .owner = THIS_MODULE, +static struct atmdev_ops atm_ops = { + .open = ns_open, + .close = ns_close, + .ioctl = ns_ioctl, + .send = ns_send, + .phy_put = ns_phy_put, + .phy_get = ns_phy_get, + .proc_read = ns_proc_read, + .owner = THIS_MODULE, }; + static struct timer_list ns_timer; static char *mac[NS_MAX_CARDS]; module_param_array(mac, charp, NULL, 0); MODULE_LICENSE("GPL"); - -/* Functions*******************************************************************/ +/* Functions */ static int __devinit nicstar_init_one(struct pci_dev *pcidev, const struct pci_device_id *ent) { - static int index = -1; - unsigned int error; + static int index = -1; + unsigned int error; - index++; - cards[index] = NULL; + index++; + cards[index] = NULL; - error = ns_init_card(index, pcidev); - if (error) { - cards[index--] = NULL; /* don't increment index */ - goto err_out; - } + error = ns_init_card(index, pcidev); + if (error) { + cards[index--] = NULL; /* don't increment index */ + goto err_out; + } - return 0; + return 0; err_out: - return -ENODEV; + return -ENODEV; } - - static void __devexit nicstar_remove_one(struct pci_dev *pcidev) { - int i, j; - ns_dev *card = pci_get_drvdata(pcidev); - struct sk_buff *hb; - struct sk_buff *iovb; - struct sk_buff *lb; - struct sk_buff *sb; - - i = card->index; - - if (cards[i] == NULL) - return; - - if (card->atmdev->phy && card->atmdev->phy->stop) - card->atmdev->phy->stop(card->atmdev); - - /* Stop everything */ - writel(0x00000000, card->membase + CFG); - - /* De-register device */ - atm_dev_deregister(card->atmdev); - - /* Disable PCI device */ - pci_disable_device(pcidev); - - /* Free up resources */ - j = 0; - PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); - while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) - { - dev_kfree_skb_any(hb); - j++; - } - PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); - j = 0; - PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); - while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) - { - dev_kfree_skb_any(iovb); - j++; - } - PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); - while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) - dev_kfree_skb_any(lb); - while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) - dev_kfree_skb_any(sb); - free_scq(card->scq0, NULL); - for (j = 0; j < NS_FRSCD_NUM; j++) - { - if (card->scd2vc[j] != NULL) - free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); - } - kfree(card->rsq.org); - kfree(card->tsq.org); - free_irq(card->pcidev->irq, card); - iounmap(card->membase); - kfree(card); + int i, j; + ns_dev *card = pci_get_drvdata(pcidev); + struct sk_buff *hb; + struct sk_buff *iovb; + struct sk_buff *lb; + struct sk_buff *sb; + + i = card->index; + + if (cards[i] == NULL) + return; + + if (card->atmdev->phy && card->atmdev->phy->stop) + card->atmdev->phy->stop(card->atmdev); + + /* Stop everything */ + writel(0x00000000, card->membase + CFG); + + /* De-register device */ + atm_dev_deregister(card->atmdev); + + /* Disable PCI device */ + pci_disable_device(pcidev); + + /* Free up resources */ + j = 0; + PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); + while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { + dev_kfree_skb_any(hb); + j++; + } + PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); + j = 0; + PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, + card->iovpool.count); + while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { + dev_kfree_skb_any(iovb); + j++; + } + PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); + while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) + dev_kfree_skb_any(lb); + while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) + dev_kfree_skb_any(sb); + free_scq(card, card->scq0, NULL); + for (j = 0; j < NS_FRSCD_NUM; j++) { + if (card->scd2vc[j] != NULL) + free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); + } + idr_remove_all(&card->idr); + idr_destroy(&card->idr); + pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, + card->rsq.org, card->rsq.dma); + pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, + card->tsq.org, card->tsq.dma); + free_irq(card->pcidev->irq, card); + iounmap(card->membase); + kfree(card); } - - -static struct pci_device_id nicstar_pci_tbl[] __devinitdata = -{ - {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, +static struct pci_device_id nicstar_pci_tbl[] __devinitdata = { + { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; -MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); - +MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); static struct pci_driver nicstar_driver = { - .name = "nicstar", - .id_table = nicstar_pci_tbl, - .probe = nicstar_init_one, - .remove = __devexit_p(nicstar_remove_one), + .name = "nicstar", + .id_table = nicstar_pci_tbl, + .probe = nicstar_init_one, + .remove = __devexit_p(nicstar_remove_one), }; - - static int __init nicstar_init(void) { - unsigned error = 0; /* Initialized to remove compile warning */ + unsigned error = 0; /* Initialized to remove compile warning */ + + XPRINTK("nicstar: nicstar_init() called.\n"); - XPRINTK("nicstar: nicstar_init() called.\n"); + error = pci_register_driver(&nicstar_driver); - error = pci_register_driver(&nicstar_driver); - - TXPRINTK("nicstar: TX debug enabled.\n"); - RXPRINTK("nicstar: RX debug enabled.\n"); - PRINTK("nicstar: General debug enabled.\n"); + TXPRINTK("nicstar: TX debug enabled.\n"); + RXPRINTK("nicstar: RX debug enabled.\n"); + PRINTK("nicstar: General debug enabled.\n"); #ifdef PHY_LOOPBACK - printk("nicstar: using PHY loopback.\n"); + printk("nicstar: using PHY loopback.\n"); #endif /* PHY_LOOPBACK */ - XPRINTK("nicstar: nicstar_init() returned.\n"); - - if (!error) { - init_timer(&ns_timer); - ns_timer.expires = jiffies + NS_POLL_PERIOD; - ns_timer.data = 0UL; - ns_timer.function = ns_poll; - add_timer(&ns_timer); - } - - return error; -} + XPRINTK("nicstar: nicstar_init() returned.\n"); + if (!error) { + init_timer(&ns_timer); + ns_timer.expires = jiffies + NS_POLL_PERIOD; + ns_timer.data = 0UL; + ns_timer.function = ns_poll; + add_timer(&ns_timer); + } + return error; +} static void __exit nicstar_cleanup(void) { - XPRINTK("nicstar: nicstar_cleanup() called.\n"); + XPRINTK("nicstar: nicstar_cleanup() called.\n"); - del_timer(&ns_timer); + del_timer(&ns_timer); - pci_unregister_driver(&nicstar_driver); + pci_unregister_driver(&nicstar_driver); - XPRINTK("nicstar: nicstar_cleanup() returned.\n"); + XPRINTK("nicstar: nicstar_cleanup() returned.\n"); } - - -static u32 ns_read_sram(ns_dev *card, u32 sram_address) +static u32 ns_read_sram(ns_dev * card, u32 sram_address) { - unsigned long flags; - u32 data; - sram_address <<= 2; - sram_address &= 0x0007FFFC; /* address must be dword aligned */ - sram_address |= 0x50000000; /* SRAM read command */ - spin_lock_irqsave(&card->res_lock, flags); - while (CMD_BUSY(card)); - writel(sram_address, card->membase + CMD); - while (CMD_BUSY(card)); - data = readl(card->membase + DR0); - spin_unlock_irqrestore(&card->res_lock, flags); - return data; + unsigned long flags; + u32 data; + sram_address <<= 2; + sram_address &= 0x0007FFFC; /* address must be dword aligned */ + sram_address |= 0x50000000; /* SRAM read command */ + spin_lock_irqsave(&card->res_lock, flags); + while (CMD_BUSY(card)) ; + writel(sram_address, card->membase + CMD); + while (CMD_BUSY(card)) ; + data = readl(card->membase + DR0); + spin_unlock_irqrestore(&card->res_lock, flags); + return data; } - - -static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count) +static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, + int count) { - unsigned long flags; - int i, c; - count--; /* count range now is 0..3 instead of 1..4 */ - c = count; - c <<= 2; /* to use increments of 4 */ - spin_lock_irqsave(&card->res_lock, flags); - while (CMD_BUSY(card)); - for (i = 0; i <= c; i += 4) - writel(*(value++), card->membase + i); - /* Note: DR# registers are the first 4 dwords in nicstar's memspace, - so card->membase + DR0 == card->membase */ - sram_address <<= 2; - sram_address &= 0x0007FFFC; - sram_address |= (0x40000000 | count); - writel(sram_address, card->membase + CMD); - spin_unlock_irqrestore(&card->res_lock, flags); + unsigned long flags; + int i, c; + count--; /* count range now is 0..3 instead of 1..4 */ + c = count; + c <<= 2; /* to use increments of 4 */ + spin_lock_irqsave(&card->res_lock, flags); + while (CMD_BUSY(card)) ; + for (i = 0; i <= c; i += 4) + writel(*(value++), card->membase + i); + /* Note: DR# registers are the first 4 dwords in nicstar's memspace, + so card->membase + DR0 == card->membase */ + sram_address <<= 2; + sram_address &= 0x0007FFFC; + sram_address |= (0x40000000 | count); + writel(sram_address, card->membase + CMD); + spin_unlock_irqrestore(&card->res_lock, flags); } - static int __devinit ns_init_card(int i, struct pci_dev *pcidev) { - int j; - struct ns_dev *card = NULL; - unsigned char pci_latency; - unsigned error; - u32 data; - u32 u32d[4]; - u32 ns_cfg_rctsize; - int bcount; - unsigned long membase; - - error = 0; - - if (pci_enable_device(pcidev)) - { - printk("nicstar%d: can't enable PCI device\n", i); - error = 2; - ns_init_card_error(card, error); - return error; - } - - if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) - { - printk("nicstar%d: can't allocate memory for device structure.\n", i); - error = 2; - ns_init_card_error(card, error); - return error; - } - cards[i] = card; - spin_lock_init(&card->int_lock); - spin_lock_init(&card->res_lock); - - pci_set_drvdata(pcidev, card); - - card->index = i; - card->atmdev = NULL; - card->pcidev = pcidev; - membase = pci_resource_start(pcidev, 1); - card->membase = ioremap(membase, NS_IOREMAP_SIZE); - if (!card->membase) - { - printk("nicstar%d: can't ioremap() membase.\n",i); - error = 3; - ns_init_card_error(card, error); - return error; - } - PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase); - - pci_set_master(pcidev); - - if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) - { - printk("nicstar%d: can't read PCI latency timer.\n", i); - error = 6; - ns_init_card_error(card, error); - return error; - } + int j; + struct ns_dev *card = NULL; + unsigned char pci_latency; + unsigned error; + u32 data; + u32 u32d[4]; + u32 ns_cfg_rctsize; + int bcount; + unsigned long membase; + + error = 0; + + if (pci_enable_device(pcidev)) { + printk("nicstar%d: can't enable PCI device\n", i); + error = 2; + ns_init_card_error(card, error); + return error; + } + if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) { + printk(KERN_WARNING + "nicstar%d: No suitable DMA available.\n", i); + error = 2; + ns_init_card_error(card, error); + return error; + } + + if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { + printk + ("nicstar%d: can't allocate memory for device structure.\n", + i); + error = 2; + ns_init_card_error(card, error); + return error; + } + cards[i] = card; + spin_lock_init(&card->int_lock); + spin_lock_init(&card->res_lock); + + pci_set_drvdata(pcidev, card); + + card->index = i; + card->atmdev = NULL; + card->pcidev = pcidev; + membase = pci_resource_start(pcidev, 1); + card->membase = ioremap(membase, NS_IOREMAP_SIZE); + if (!card->membase) { + printk("nicstar%d: can't ioremap() membase.\n", i); + error = 3; + ns_init_card_error(card, error); + return error; + } + PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); + + pci_set_master(pcidev); + + if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { + printk("nicstar%d: can't read PCI latency timer.\n", i); + error = 6; + ns_init_card_error(card, error); + return error; + } #ifdef NS_PCI_LATENCY - if (pci_latency < NS_PCI_LATENCY) - { - PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); - for (j = 1; j < 4; j++) - { - if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) - break; - } - if (j == 4) - { - printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); - error = 7; - ns_init_card_error(card, error); - return error; - } - } + if (pci_latency < NS_PCI_LATENCY) { + PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, + NS_PCI_LATENCY); + for (j = 1; j < 4; j++) { + if (pci_write_config_byte + (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) + break; + } + if (j == 4) { + printk + ("nicstar%d: can't set PCI latency timer to %d.\n", + i, NS_PCI_LATENCY); + error = 7; + ns_init_card_error(card, error); + return error; + } + } #endif /* NS_PCI_LATENCY */ - - /* Clear timer overflow */ - data = readl(card->membase + STAT); - if (data & NS_STAT_TMROF) - writel(NS_STAT_TMROF, card->membase + STAT); - - /* Software reset */ - writel(NS_CFG_SWRST, card->membase + CFG); - NS_DELAY; - writel(0x00000000, card->membase + CFG); - - /* PHY reset */ - writel(0x00000008, card->membase + GP); - NS_DELAY; - writel(0x00000001, card->membase + GP); - NS_DELAY; - while (CMD_BUSY(card)); - writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ - NS_DELAY; - - /* Detect PHY type */ - while (CMD_BUSY(card)); - writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); - while (CMD_BUSY(card)); - data = readl(card->membase + DR0); - switch(data) { - case 0x00000009: - printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); - card->max_pcr = ATM_25_PCR; - while(CMD_BUSY(card)); - writel(0x00000008, card->membase + DR0); - writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); - /* Clear an eventual pending interrupt */ - writel(NS_STAT_SFBQF, card->membase + STAT); + + /* Clear timer overflow */ + data = readl(card->membase + STAT); + if (data & NS_STAT_TMROF) + writel(NS_STAT_TMROF, card->membase + STAT); + + /* Software reset */ + writel(NS_CFG_SWRST, card->membase + CFG); + NS_DELAY; + writel(0x00000000, card->membase + CFG); + + /* PHY reset */ + writel(0x00000008, card->membase + GP); + NS_DELAY; + writel(0x00000001, card->membase + GP); + NS_DELAY; + while (CMD_BUSY(card)) ; + writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ + NS_DELAY; + + /* Detect PHY type */ + while (CMD_BUSY(card)) ; + writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); + while (CMD_BUSY(card)) ; + data = readl(card->membase + DR0); + switch (data) { + case 0x00000009: + printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); + card->max_pcr = ATM_25_PCR; + while (CMD_BUSY(card)) ; + writel(0x00000008, card->membase + DR0); + writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); + /* Clear an eventual pending interrupt */ + writel(NS_STAT_SFBQF, card->membase + STAT); #ifdef PHY_LOOPBACK - while(CMD_BUSY(card)); - writel(0x00000022, card->membase + DR0); - writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); + while (CMD_BUSY(card)) ; + writel(0x00000022, card->membase + DR0); + writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); #endif /* PHY_LOOPBACK */ - break; - case 0x00000030: - case 0x00000031: - printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); - card->max_pcr = ATM_OC3_PCR; + break; + case 0x00000030: + case 0x00000031: + printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); + card->max_pcr = ATM_OC3_PCR; #ifdef PHY_LOOPBACK - while(CMD_BUSY(card)); - writel(0x00000002, card->membase + DR0); - writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); + while (CMD_BUSY(card)) ; + writel(0x00000002, card->membase + DR0); + writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); #endif /* PHY_LOOPBACK */ - break; - default: - printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); - error = 8; - ns_init_card_error(card, error); - return error; - } - writel(0x00000000, card->membase + GP); - - /* Determine SRAM size */ - data = 0x76543210; - ns_write_sram(card, 0x1C003, &data, 1); - data = 0x89ABCDEF; - ns_write_sram(card, 0x14003, &data, 1); - if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && - ns_read_sram(card, 0x1C003) == 0x76543210) - card->sram_size = 128; - else - card->sram_size = 32; - PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); - - card->rct_size = NS_MAX_RCTSIZE; + break; + default: + printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); + error = 8; + ns_init_card_error(card, error); + return error; + } + writel(0x00000000, card->membase + GP); + + /* Determine SRAM size */ + data = 0x76543210; + ns_write_sram(card, 0x1C003, &data, 1); + data = 0x89ABCDEF; + ns_write_sram(card, 0x14003, &data, 1); + if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && + ns_read_sram(card, 0x1C003) == 0x76543210) + card->sram_size = 128; + else + card->sram_size = 32; + PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); + + card->rct_size = NS_MAX_RCTSIZE; #if (NS_MAX_RCTSIZE == 4096) - if (card->sram_size == 128) - printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); + if (card->sram_size == 128) + printk + ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", + i); #elif (NS_MAX_RCTSIZE == 16384) - if (card->sram_size == 32) - { - printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); - card->rct_size = 4096; - } + if (card->sram_size == 32) { + printk + ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", + i); + card->rct_size = 4096; + } #else #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c #endif - card->vpibits = NS_VPIBITS; - if (card->rct_size == 4096) - card->vcibits = 12 - NS_VPIBITS; - else /* card->rct_size == 16384 */ - card->vcibits = 14 - NS_VPIBITS; - - /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ - if (mac[i] == NULL) - nicstar_init_eprom(card->membase); - - /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ - writel(0x00000000, card->membase + VPM); - - /* Initialize TSQ */ - card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL); - if (card->tsq.org == NULL) - { - printk("nicstar%d: can't allocate TSQ.\n", i); - error = 10; - ns_init_card_error(card, error); - return error; - } - card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT); - card->tsq.next = card->tsq.base; - card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); - for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) - ns_tsi_init(card->tsq.base + j); - writel(0x00000000, card->membase + TSQH); - writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB); - PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base, - (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB)); - - /* Initialize RSQ */ - card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL); - if (card->rsq.org == NULL) - { - printk("nicstar%d: can't allocate RSQ.\n", i); - error = 11; - ns_init_card_error(card, error); - return error; - } - card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT); - card->rsq.next = card->rsq.base; - card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); - for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) - ns_rsqe_init(card->rsq.base + j); - writel(0x00000000, card->membase + RSQH); - writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB); - PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base); - - /* Initialize SCQ0, the only VBR SCQ used */ - card->scq1 = NULL; - card->scq2 = NULL; - card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0); - if (card->scq0 == NULL) - { - printk("nicstar%d: can't get SCQ0.\n", i); - error = 12; - ns_init_card_error(card, error); - return error; - } - u32d[0] = (u32) virt_to_bus(card->scq0->base); - u32d[1] = (u32) 0x00000000; - u32d[2] = (u32) 0xffffffff; - u32d[3] = (u32) 0x00000000; - ns_write_sram(card, NS_VRSCD0, u32d, 4); - ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ - ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ - card->scq0->scd = NS_VRSCD0; - PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base); - - /* Initialize TSTs */ - card->tst_addr = NS_TST0; - card->tst_free_entries = NS_TST_NUM_ENTRIES; - data = NS_TST_OPCODE_VARIABLE; - for (j = 0; j < NS_TST_NUM_ENTRIES; j++) - ns_write_sram(card, NS_TST0 + j, &data, 1); - data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); - ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); - for (j = 0; j < NS_TST_NUM_ENTRIES; j++) - ns_write_sram(card, NS_TST1 + j, &data, 1); - data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); - ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); - for (j = 0; j < NS_TST_NUM_ENTRIES; j++) - card->tste2vc[j] = NULL; - writel(NS_TST0 << 2, card->membase + TSTB); - - - /* Initialize RCT. AAL type is set on opening the VC. */ + card->vpibits = NS_VPIBITS; + if (card->rct_size == 4096) + card->vcibits = 12 - NS_VPIBITS; + else /* card->rct_size == 16384 */ + card->vcibits = 14 - NS_VPIBITS; + + /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ + if (mac[i] == NULL) + nicstar_init_eprom(card->membase); + + /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ + writel(0x00000000, card->membase + VPM); + + /* Initialize TSQ */ + card->tsq.org = pci_alloc_consistent(card->pcidev, + NS_TSQSIZE + NS_TSQ_ALIGNMENT, + &card->tsq.dma); + if (card->tsq.org == NULL) { + printk("nicstar%d: can't allocate TSQ.\n", i); + error = 10; + ns_init_card_error(card, error); + return error; + } + card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); + card->tsq.next = card->tsq.base; + card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); + for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) + ns_tsi_init(card->tsq.base + j); + writel(0x00000000, card->membase + TSQH); + writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); + PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); + + /* Initialize RSQ */ + card->rsq.org = pci_alloc_consistent(card->pcidev, + NS_RSQSIZE + NS_RSQ_ALIGNMENT, + &card->rsq.dma); + if (card->rsq.org == NULL) { + printk("nicstar%d: can't allocate RSQ.\n", i); + error = 11; + ns_init_card_error(card, error); + return error; + } + card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); + card->rsq.next = card->rsq.base; + card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); + for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) + ns_rsqe_init(card->rsq.base + j); + writel(0x00000000, card->membase + RSQH); + writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); + PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); + + /* Initialize SCQ0, the only VBR SCQ used */ + card->scq1 = NULL; + card->scq2 = NULL; + card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); + if (card->scq0 == NULL) { + printk("nicstar%d: can't get SCQ0.\n", i); + error = 12; + ns_init_card_error(card, error); + return error; + } + u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); + u32d[1] = (u32) 0x00000000; + u32d[2] = (u32) 0xffffffff; + u32d[3] = (u32) 0x00000000; + ns_write_sram(card, NS_VRSCD0, u32d, 4); + ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ + ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ + card->scq0->scd = NS_VRSCD0; + PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); + + /* Initialize TSTs */ + card->tst_addr = NS_TST0; + card->tst_free_entries = NS_TST_NUM_ENTRIES; + data = NS_TST_OPCODE_VARIABLE; + for (j = 0; j < NS_TST_NUM_ENTRIES; j++) + ns_write_sram(card, NS_TST0 + j, &data, 1); + data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); + ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); + for (j = 0; j < NS_TST_NUM_ENTRIES; j++) + ns_write_sram(card, NS_TST1 + j, &data, 1); + data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); + ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); + for (j = 0; j < NS_TST_NUM_ENTRIES; j++) + card->tste2vc[j] = NULL; + writel(NS_TST0 << 2, card->membase + TSTB); + + /* Initialize RCT. AAL type is set on opening the VC. */ #ifdef RCQ_SUPPORT - u32d[0] = NS_RCTE_RAWCELLINTEN; + u32d[0] = NS_RCTE_RAWCELLINTEN; #else - u32d[0] = 0x00000000; + u32d[0] = 0x00000000; #endif /* RCQ_SUPPORT */ - u32d[1] = 0x00000000; - u32d[2] = 0x00000000; - u32d[3] = 0xFFFFFFFF; - for (j = 0; j < card->rct_size; j++) - ns_write_sram(card, j * 4, u32d, 4); - - memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); - - for (j = 0; j < NS_FRSCD_NUM; j++) - card->scd2vc[j] = NULL; - - /* Initialize buffer levels */ - card->sbnr.min = MIN_SB; - card->sbnr.init = NUM_SB; - card->sbnr.max = MAX_SB; - card->lbnr.min = MIN_LB; - card->lbnr.init = NUM_LB; - card->lbnr.max = MAX_LB; - card->iovnr.min = MIN_IOVB; - card->iovnr.init = NUM_IOVB; - card->iovnr.max = MAX_IOVB; - card->hbnr.min = MIN_HB; - card->hbnr.init = NUM_HB; - card->hbnr.max = MAX_HB; - - card->sm_handle = 0x00000000; - card->sm_addr = 0x00000000; - card->lg_handle = 0x00000000; - card->lg_addr = 0x00000000; - - card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ - - /* Pre-allocate some huge buffers */ - skb_queue_head_init(&card->hbpool.queue); - card->hbpool.count = 0; - for (j = 0; j < NUM_HB; j++) - { - struct sk_buff *hb; - hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); - if (hb == NULL) - { - printk("nicstar%d: can't allocate %dth of %d huge buffers.\n", - i, j, NUM_HB); - error = 13; - ns_init_card_error(card, error); - return error; - } - NS_SKB_CB(hb)->buf_type = BUF_NONE; - skb_queue_tail(&card->hbpool.queue, hb); - card->hbpool.count++; - } - - - /* Allocate large buffers */ - skb_queue_head_init(&card->lbpool.queue); - card->lbpool.count = 0; /* Not used */ - for (j = 0; j < NUM_LB; j++) - { - struct sk_buff *lb; - lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); - if (lb == NULL) - { - printk("nicstar%d: can't allocate %dth of %d large buffers.\n", - i, j, NUM_LB); - error = 14; - ns_init_card_error(card, error); - return error; - } - NS_SKB_CB(lb)->buf_type = BUF_LG; - skb_queue_tail(&card->lbpool.queue, lb); - skb_reserve(lb, NS_SMBUFSIZE); - push_rxbufs(card, lb); - /* Due to the implementation of push_rxbufs() this is 1, not 0 */ - if (j == 1) - { - card->rcbuf = lb; - card->rawch = (u32) virt_to_bus(lb->data); - } - } - /* Test for strange behaviour which leads to crashes */ - if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) - { - printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", - i, j, bcount); - error = 14; - ns_init_card_error(card, error); - return error; - } - - - /* Allocate small buffers */ - skb_queue_head_init(&card->sbpool.queue); - card->sbpool.count = 0; /* Not used */ - for (j = 0; j < NUM_SB; j++) - { - struct sk_buff *sb; - sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); - if (sb == NULL) - { - printk("nicstar%d: can't allocate %dth of %d small buffers.\n", - i, j, NUM_SB); - error = 15; - ns_init_card_error(card, error); - return error; - } - NS_SKB_CB(sb)->buf_type = BUF_SM; - skb_queue_tail(&card->sbpool.queue, sb); - skb_reserve(sb, NS_AAL0_HEADER); - push_rxbufs(card, sb); - } - /* Test for strange behaviour which leads to crashes */ - if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) - { - printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", - i, j, bcount); - error = 15; - ns_init_card_error(card, error); - return error; - } - - - /* Allocate iovec buffers */ - skb_queue_head_init(&card->iovpool.queue); - card->iovpool.count = 0; - for (j = 0; j < NUM_IOVB; j++) - { - struct sk_buff *iovb; - iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); - if (iovb == NULL) - { - printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n", - i, j, NUM_IOVB); - error = 16; - ns_init_card_error(card, error); - return error; - } - NS_SKB_CB(iovb)->buf_type = BUF_NONE; - skb_queue_tail(&card->iovpool.queue, iovb); - card->iovpool.count++; - } - - /* Configure NICStAR */ - if (card->rct_size == 4096) - ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; - else /* (card->rct_size == 16384) */ - ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; - - card->efbie = 1; - - card->intcnt = 0; - if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) - { - printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); - error = 9; - ns_init_card_error(card, error); - return error; - } - - /* Register device */ - card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); - if (card->atmdev == NULL) - { - printk("nicstar%d: can't register device.\n", i); - error = 17; - ns_init_card_error(card, error); - return error; - } - - if (ns_parse_mac(mac[i], card->atmdev->esi)) { - nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, - card->atmdev->esi, 6); - if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) { - nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, - card->atmdev->esi, 6); - } - } - - printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); - - card->atmdev->dev_data = card; - card->atmdev->ci_range.vpi_bits = card->vpibits; - card->atmdev->ci_range.vci_bits = card->vcibits; - card->atmdev->link_rate = card->max_pcr; - card->atmdev->phy = NULL; + u32d[1] = 0x00000000; + u32d[2] = 0x00000000; + u32d[3] = 0xFFFFFFFF; + for (j = 0; j < card->rct_size; j++) + ns_write_sram(card, j * 4, u32d, 4); + + memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); + + for (j = 0; j < NS_FRSCD_NUM; j++) + card->scd2vc[j] = NULL; + + /* Initialize buffer levels */ + card->sbnr.min = MIN_SB; + card->sbnr.init = NUM_SB; + card->sbnr.max = MAX_SB; + card->lbnr.min = MIN_LB; + card->lbnr.init = NUM_LB; + card->lbnr.max = MAX_LB; + card->iovnr.min = MIN_IOVB; + card->iovnr.init = NUM_IOVB; + card->iovnr.max = MAX_IOVB; + card->hbnr.min = MIN_HB; + card->hbnr.init = NUM_HB; + card->hbnr.max = MAX_HB; + + card->sm_handle = 0x00000000; + card->sm_addr = 0x00000000; + card->lg_handle = 0x00000000; + card->lg_addr = 0x00000000; + + card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ + + idr_init(&card->idr); + + /* Pre-allocate some huge buffers */ + skb_queue_head_init(&card->hbpool.queue); + card->hbpool.count = 0; + for (j = 0; j < NUM_HB; j++) { + struct sk_buff *hb; + hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); + if (hb == NULL) { + printk + ("nicstar%d: can't allocate %dth of %d huge buffers.\n", + i, j, NUM_HB); + error = 13; + ns_init_card_error(card, error); + return error; + } + NS_PRV_BUFTYPE(hb) = BUF_NONE; + skb_queue_tail(&card->hbpool.queue, hb); + card->hbpool.count++; + } + + /* Allocate large buffers */ + skb_queue_head_init(&card->lbpool.queue); + card->lbpool.count = 0; /* Not used */ + for (j = 0; j < NUM_LB; j++) { + struct sk_buff *lb; + lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); + if (lb == NULL) { + printk + ("nicstar%d: can't allocate %dth of %d large buffers.\n", + i, j, NUM_LB); + error = 14; + ns_init_card_error(card, error); + return error; + } + NS_PRV_BUFTYPE(lb) = BUF_LG; + skb_queue_tail(&card->lbpool.queue, lb); + skb_reserve(lb, NS_SMBUFSIZE); + push_rxbufs(card, lb); + /* Due to the implementation of push_rxbufs() this is 1, not 0 */ + if (j == 1) { + card->rcbuf = lb; + card->rawcell = (struct ns_rcqe *) lb->data; + card->rawch = NS_PRV_DMA(lb); + } + } + /* Test for strange behaviour which leads to crashes */ + if ((bcount = + ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { + printk + ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", + i, j, bcount); + error = 14; + ns_init_card_error(card, error); + return error; + } + + /* Allocate small buffers */ + skb_queue_head_init(&card->sbpool.queue); + card->sbpool.count = 0; /* Not used */ + for (j = 0; j < NUM_SB; j++) { + struct sk_buff *sb; + sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); + if (sb == NULL) { + printk + ("nicstar%d: can't allocate %dth of %d small buffers.\n", + i, j, NUM_SB); + error = 15; + ns_init_card_error(card, error); + return error; + } + NS_PRV_BUFTYPE(sb) = BUF_SM; + skb_queue_tail(&card->sbpool.queue, sb); + skb_reserve(sb, NS_AAL0_HEADER); + push_rxbufs(card, sb); + } + /* Test for strange behaviour which leads to crashes */ + if ((bcount = + ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { + printk + ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", + i, j, bcount); + error = 15; + ns_init_card_error(card, error); + return error; + } + + /* Allocate iovec buffers */ + skb_queue_head_init(&card->iovpool.queue); + card->iovpool.count = 0; + for (j = 0; j < NUM_IOVB; j++) { + struct sk_buff *iovb; + iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); + if (iovb == NULL) { + printk + ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", + i, j, NUM_IOVB); + error = 16; + ns_init_card_error(card, error); + return error; + } + NS_PRV_BUFTYPE(iovb) = BUF_NONE; + skb_queue_tail(&card->iovpool.queue, iovb); + card->iovpool.count++; + } + + /* Configure NICStAR */ + if (card->rct_size == 4096) + ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; + else /* (card->rct_size == 16384) */ + ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; + + card->efbie = 1; + + card->intcnt = 0; + if (request_irq + (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { + printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); + error = 9; + ns_init_card_error(card, error); + return error; + } + + /* Register device */ + card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); + if (card->atmdev == NULL) { + printk("nicstar%d: can't register device.\n", i); + error = 17; + ns_init_card_error(card, error); + return error; + } + + if (ns_parse_mac(mac[i], card->atmdev->esi)) { + nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, + card->atmdev->esi, 6); + if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == + 0) { + nicstar_read_eprom(card->membase, + NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, + card->atmdev->esi, 6); + } + } + + printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); + + card->atmdev->dev_data = card; + card->atmdev->ci_range.vpi_bits = card->vpibits; + card->atmdev->ci_range.vci_bits = card->vcibits; + card->atmdev->link_rate = card->max_pcr; + card->atmdev->phy = NULL; #ifdef CONFIG_ATM_NICSTAR_USE_SUNI - if (card->max_pcr == ATM_OC3_PCR) - suni_init(card->atmdev); + if (card->max_pcr == ATM_OC3_PCR) + suni_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 - if (card->max_pcr == ATM_25_PCR) - idt77105_init(card->atmdev); + if (card->max_pcr == ATM_25_PCR) + idt77105_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ - if (card->atmdev->phy && card->atmdev->phy->start) - card->atmdev->phy->start(card->atmdev); - - writel(NS_CFG_RXPATH | - NS_CFG_SMBUFSIZE | - NS_CFG_LGBUFSIZE | - NS_CFG_EFBIE | - NS_CFG_RSQSIZE | - NS_CFG_VPIBITS | - ns_cfg_rctsize | - NS_CFG_RXINT_NODELAY | - NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ - NS_CFG_RSQAFIE | - NS_CFG_TXEN | - NS_CFG_TXIE | - NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ - NS_CFG_PHYIE, - card->membase + CFG); - - num_cards++; - - return error; -} + if (card->atmdev->phy && card->atmdev->phy->start) + card->atmdev->phy->start(card->atmdev); + writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ + NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ + NS_CFG_PHYIE, card->membase + CFG); + num_cards++; -static void __devinit ns_init_card_error(ns_dev *card, int error) -{ - if (error >= 17) - { - writel(0x00000000, card->membase + CFG); - } - if (error >= 16) - { - struct sk_buff *iovb; - while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) - dev_kfree_skb_any(iovb); - } - if (error >= 15) - { - struct sk_buff *sb; - while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) - dev_kfree_skb_any(sb); - free_scq(card->scq0, NULL); - } - if (error >= 14) - { - struct sk_buff *lb; - while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) - dev_kfree_skb_any(lb); - } - if (error >= 13) - { - struct sk_buff *hb; - while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) - dev_kfree_skb_any(hb); - } - if (error >= 12) - { - kfree(card->rsq.org); - } - if (error >= 11) - { - kfree(card->tsq.org); - } - if (error >= 10) - { - free_irq(card->pcidev->irq, card); - } - if (error >= 4) - { - iounmap(card->membase); - } - if (error >= 3) - { - pci_disable_device(card->pcidev); - kfree(card); - } + return error; } - - -static scq_info *get_scq(int size, u32 scd) +static void __devinit ns_init_card_error(ns_dev * card, int error) { - scq_info *scq; - int i; - - if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) - return NULL; - - scq = kmalloc(sizeof(scq_info), GFP_KERNEL); - if (scq == NULL) - return NULL; - scq->org = kmalloc(2 * size, GFP_KERNEL); - if (scq->org == NULL) - { - kfree(scq); - return NULL; - } - scq->skb = kmalloc(sizeof(struct sk_buff *) * - (size / NS_SCQE_SIZE), GFP_KERNEL); - if (scq->skb == NULL) - { - kfree(scq->org); - kfree(scq); - return NULL; - } - scq->num_entries = size / NS_SCQE_SIZE; - scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size); - scq->next = scq->base; - scq->last = scq->base + (scq->num_entries - 1); - scq->tail = scq->last; - scq->scd = scd; - scq->num_entries = size / NS_SCQE_SIZE; - scq->tbd_count = 0; - init_waitqueue_head(&scq->scqfull_waitq); - scq->full = 0; - spin_lock_init(&scq->lock); - - for (i = 0; i < scq->num_entries; i++) - scq->skb[i] = NULL; - - return scq; + if (error >= 17) { + writel(0x00000000, card->membase + CFG); + } + if (error >= 16) { + struct sk_buff *iovb; + while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) + dev_kfree_skb_any(iovb); + } + if (error >= 15) { + struct sk_buff *sb; + while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) + dev_kfree_skb_any(sb); + free_scq(card, card->scq0, NULL); + } + if (error >= 14) { + struct sk_buff *lb; + while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) + dev_kfree_skb_any(lb); + } + if (error >= 13) { + struct sk_buff *hb; + while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) + dev_kfree_skb_any(hb); + } + if (error >= 12) { + kfree(card->rsq.org); + } + if (error >= 11) { + kfree(card->tsq.org); + } + if (error >= 10) { + free_irq(card->pcidev->irq, card); + } + if (error >= 4) { + iounmap(card->membase); + } + if (error >= 3) { + pci_disable_device(card->pcidev); + kfree(card); + } } - +static scq_info *get_scq(ns_dev *card, int size, u32 scd) +{ + scq_info *scq; + int i; + + if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) + return NULL; + + scq = kmalloc(sizeof(scq_info), GFP_KERNEL); + if (!scq) + return NULL; + scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma); + if (!scq->org) { + kfree(scq); + return NULL; + } + scq->skb = kmalloc(sizeof(struct sk_buff *) * + (size / NS_SCQE_SIZE), GFP_KERNEL); + if (!scq->skb) { + kfree(scq->org); + kfree(scq); + return NULL; + } + scq->num_entries = size / NS_SCQE_SIZE; + scq->base = PTR_ALIGN(scq->org, size); + scq->next = scq->base; + scq->last = scq->base + (scq->num_entries - 1); + scq->tail = scq->last; + scq->scd = scd; + scq->num_entries = size / NS_SCQE_SIZE; + scq->tbd_count = 0; + init_waitqueue_head(&scq->scqfull_waitq); + scq->full = 0; + spin_lock_init(&scq->lock); + + for (i = 0; i < scq->num_entries; i++) + scq->skb[i] = NULL; + + return scq; +} /* For variable rate SCQ vcc must be NULL */ -static void free_scq(scq_info *scq, struct atm_vcc *vcc) +static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) { - int i; - - if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) - for (i = 0; i < scq->num_entries; i++) - { - if (scq->skb[i] != NULL) - { - vcc = ATM_SKB(scq->skb[i])->vcc; - if (vcc->pop != NULL) - vcc->pop(vcc, scq->skb[i]); - else - dev_kfree_skb_any(scq->skb[i]); - } - } - else /* vcc must be != NULL */ - { - if (vcc == NULL) - { - printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); - for (i = 0; i < scq->num_entries; i++) - dev_kfree_skb_any(scq->skb[i]); - } - else - for (i = 0; i < scq->num_entries; i++) - { - if (scq->skb[i] != NULL) - { - if (vcc->pop != NULL) - vcc->pop(vcc, scq->skb[i]); - else - dev_kfree_skb_any(scq->skb[i]); - } - } - } - kfree(scq->skb); - kfree(scq->org); - kfree(scq); + int i; + + if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) + for (i = 0; i < scq->num_entries; i++) { + if (scq->skb[i] != NULL) { + vcc = ATM_SKB(scq->skb[i])->vcc; + if (vcc->pop != NULL) + vcc->pop(vcc, scq->skb[i]); + else + dev_kfree_skb_any(scq->skb[i]); + } + } else { /* vcc must be != NULL */ + + if (vcc == NULL) { + printk + ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); + for (i = 0; i < scq->num_entries; i++) + dev_kfree_skb_any(scq->skb[i]); + } else + for (i = 0; i < scq->num_entries; i++) { + if (scq->skb[i] != NULL) { + if (vcc->pop != NULL) + vcc->pop(vcc, scq->skb[i]); + else + dev_kfree_skb_any(scq->skb[i]); + } + } + } + kfree(scq->skb); + pci_free_consistent(card->pcidev, + 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? + VBR_SCQSIZE : CBR_SCQSIZE), + scq->org, scq->dma); + kfree(scq); } - - /* The handles passed must be pointers to the sk_buff containing the small or large buffer(s) cast to u32. */ -static void push_rxbufs(ns_dev *card, struct sk_buff *skb) +static void push_rxbufs(ns_dev * card, struct sk_buff *skb) { - struct ns_skb_cb *cb = NS_SKB_CB(skb); - u32 handle1, addr1; - u32 handle2, addr2; - u32 stat; - unsigned long flags; - - /* *BARF* */ - handle2 = addr2 = 0; - handle1 = (u32)skb; - addr1 = (u32)virt_to_bus(skb->data); + struct sk_buff *handle1, *handle2; + u32 id1 = 0, id2 = 0; + u32 addr1, addr2; + u32 stat; + unsigned long flags; + int err; + + /* *BARF* */ + handle2 = NULL; + addr2 = 0; + handle1 = skb; + addr1 = pci_map_single(card->pcidev, + skb->data, + (NS_PRV_BUFTYPE(skb) == BUF_SM + ? NS_SMSKBSIZE : NS_LGSKBSIZE), + PCI_DMA_TODEVICE); + NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ #ifdef GENERAL_DEBUG - if (!addr1) - printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); + if (!addr1) + printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", + card->index); #endif /* GENERAL_DEBUG */ - stat = readl(card->membase + STAT); - card->sbfqc = ns_stat_sfbqc_get(stat); - card->lbfqc = ns_stat_lfbqc_get(stat); - if (cb->buf_type == BUF_SM) - { - if (!addr2) - { - if (card->sm_addr) - { - addr2 = card->sm_addr; - handle2 = card->sm_handle; - card->sm_addr = 0x00000000; - card->sm_handle = 0x00000000; - } - else /* (!sm_addr) */ - { - card->sm_addr = addr1; - card->sm_handle = handle1; - } - } - } - else /* buf_type == BUF_LG */ - { - if (!addr2) - { - if (card->lg_addr) - { - addr2 = card->lg_addr; - handle2 = card->lg_handle; - card->lg_addr = 0x00000000; - card->lg_handle = 0x00000000; - } - else /* (!lg_addr) */ - { - card->lg_addr = addr1; - card->lg_handle = handle1; - } - } - } - - if (addr2) - { - if (cb->buf_type == BUF_SM) - { - if (card->sbfqc >= card->sbnr.max) - { - skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue); - dev_kfree_skb_any((struct sk_buff *) handle1); - skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue); - dev_kfree_skb_any((struct sk_buff *) handle2); - return; - } - else - card->sbfqc += 2; - } - else /* (buf_type == BUF_LG) */ - { - if (card->lbfqc >= card->lbnr.max) - { - skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue); - dev_kfree_skb_any((struct sk_buff *) handle1); - skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue); - dev_kfree_skb_any((struct sk_buff *) handle2); - return; - } - else - card->lbfqc += 2; - } - - spin_lock_irqsave(&card->res_lock, flags); - - while (CMD_BUSY(card)); - writel(addr2, card->membase + DR3); - writel(handle2, card->membase + DR2); - writel(addr1, card->membase + DR1); - writel(handle1, card->membase + DR0); - writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD); - - spin_unlock_irqrestore(&card->res_lock, flags); - - XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, - (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2); - } - - if (!card->efbie && card->sbfqc >= card->sbnr.min && - card->lbfqc >= card->lbnr.min) - { - card->efbie = 1; - writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); - } - - return; + stat = readl(card->membase + STAT); + card->sbfqc = ns_stat_sfbqc_get(stat); + card->lbfqc = ns_stat_lfbqc_get(stat); + if (NS_PRV_BUFTYPE(skb) == BUF_SM) { + if (!addr2) { + if (card->sm_addr) { + addr2 = card->sm_addr; + handle2 = card->sm_handle; + card->sm_addr = 0x00000000; + card->sm_handle = 0x00000000; + } else { /* (!sm_addr) */ + + card->sm_addr = addr1; + card->sm_handle = handle1; + } + } + } else { /* buf_type == BUF_LG */ + + if (!addr2) { + if (card->lg_addr) { + addr2 = card->lg_addr; + handle2 = card->lg_handle; + card->lg_addr = 0x00000000; + card->lg_handle = 0x00000000; + } else { /* (!lg_addr) */ + + card->lg_addr = addr1; + card->lg_handle = handle1; + } + } + } + + if (addr2) { + if (NS_PRV_BUFTYPE(skb) == BUF_SM) { + if (card->sbfqc >= card->sbnr.max) { + skb_unlink(handle1, &card->sbpool.queue); + dev_kfree_skb_any(handle1); + skb_unlink(handle2, &card->sbpool.queue); + dev_kfree_skb_any(handle2); + return; + } else + card->sbfqc += 2; + } else { /* (buf_type == BUF_LG) */ + + if (card->lbfqc >= card->lbnr.max) { + skb_unlink(handle1, &card->lbpool.queue); + dev_kfree_skb_any(handle1); + skb_unlink(handle2, &card->lbpool.queue); + dev_kfree_skb_any(handle2); + return; + } else + card->lbfqc += 2; + } + + do { + if (!idr_pre_get(&card->idr, GFP_ATOMIC)) { + printk(KERN_ERR + "nicstar%d: no free memory for idr\n", + card->index); + goto out; + } + + if (!id1) + err = idr_get_new_above(&card->idr, handle1, 0, &id1); + + if (!id2 && err == 0) + err = idr_get_new_above(&card->idr, handle2, 0, &id2); + + } while (err == -EAGAIN); + + if (err) + goto out; + + spin_lock_irqsave(&card->res_lock, flags); + while (CMD_BUSY(card)) ; + writel(addr2, card->membase + DR3); + writel(id2, card->membase + DR2); + writel(addr1, card->membase + DR1); + writel(id1, card->membase + DR0); + writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), + card->membase + CMD); + spin_unlock_irqrestore(&card->res_lock, flags); + + XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", + card->index, + (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), + addr1, addr2); + } + + if (!card->efbie && card->sbfqc >= card->sbnr.min && + card->lbfqc >= card->lbnr.min) { + card->efbie = 1; + writel((readl(card->membase + CFG) | NS_CFG_EFBIE), + card->membase + CFG); + } + +out: + return; } - - static irqreturn_t ns_irq_handler(int irq, void *dev_id) { - u32 stat_r; - ns_dev *card; - struct atm_dev *dev; - unsigned long flags; - - card = (ns_dev *) dev_id; - dev = card->atmdev; - card->intcnt++; - - PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); - - spin_lock_irqsave(&card->int_lock, flags); - - stat_r = readl(card->membase + STAT); - - /* Transmit Status Indicator has been written to T. S. Queue */ - if (stat_r & NS_STAT_TSIF) - { - TXPRINTK("nicstar%d: TSI interrupt\n", card->index); - process_tsq(card); - writel(NS_STAT_TSIF, card->membase + STAT); - } - - /* Incomplete CS-PDU has been transmitted */ - if (stat_r & NS_STAT_TXICP) - { - writel(NS_STAT_TXICP, card->membase + STAT); - TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", - card->index); - } - - /* Transmit Status Queue 7/8 full */ - if (stat_r & NS_STAT_TSQF) - { - writel(NS_STAT_TSQF, card->membase + STAT); - PRINTK("nicstar%d: TSQ full.\n", card->index); - process_tsq(card); - } - - /* Timer overflow */ - if (stat_r & NS_STAT_TMROF) - { - writel(NS_STAT_TMROF, card->membase + STAT); - PRINTK("nicstar%d: Timer overflow.\n", card->index); - } - - /* PHY device interrupt signal active */ - if (stat_r & NS_STAT_PHYI) - { - writel(NS_STAT_PHYI, card->membase + STAT); - PRINTK("nicstar%d: PHY interrupt.\n", card->index); - if (dev->phy && dev->phy->interrupt) { - dev->phy->interrupt(dev); - } - } - - /* Small Buffer Queue is full */ - if (stat_r & NS_STAT_SFBQF) - { - writel(NS_STAT_SFBQF, card->membase + STAT); - printk("nicstar%d: Small free buffer queue is full.\n", card->index); - } - - /* Large Buffer Queue is full */ - if (stat_r & NS_STAT_LFBQF) - { - writel(NS_STAT_LFBQF, card->membase + STAT); - printk("nicstar%d: Large free buffer queue is full.\n", card->index); - } - - /* Receive Status Queue is full */ - if (stat_r & NS_STAT_RSQF) - { - writel(NS_STAT_RSQF, card->membase + STAT); - printk("nicstar%d: RSQ full.\n", card->index); - process_rsq(card); - } - - /* Complete CS-PDU received */ - if (stat_r & NS_STAT_EOPDU) - { - RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); - process_rsq(card); - writel(NS_STAT_EOPDU, card->membase + STAT); - } - - /* Raw cell received */ - if (stat_r & NS_STAT_RAWCF) - { - writel(NS_STAT_RAWCF, card->membase + STAT); + u32 stat_r; + ns_dev *card; + struct atm_dev *dev; + unsigned long flags; + + card = (ns_dev *) dev_id; + dev = card->atmdev; + card->intcnt++; + + PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); + + spin_lock_irqsave(&card->int_lock, flags); + + stat_r = readl(card->membase + STAT); + + /* Transmit Status Indicator has been written to T. S. Queue */ + if (stat_r & NS_STAT_TSIF) { + TXPRINTK("nicstar%d: TSI interrupt\n", card->index); + process_tsq(card); + writel(NS_STAT_TSIF, card->membase + STAT); + } + + /* Incomplete CS-PDU has been transmitted */ + if (stat_r & NS_STAT_TXICP) { + writel(NS_STAT_TXICP, card->membase + STAT); + TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", + card->index); + } + + /* Transmit Status Queue 7/8 full */ + if (stat_r & NS_STAT_TSQF) { + writel(NS_STAT_TSQF, card->membase + STAT); + PRINTK("nicstar%d: TSQ full.\n", card->index); + process_tsq(card); + } + + /* Timer overflow */ + if (stat_r & NS_STAT_TMROF) { + writel(NS_STAT_TMROF, card->membase + STAT); + PRINTK("nicstar%d: Timer overflow.\n", card->index); + } + + /* PHY device interrupt signal active */ + if (stat_r & NS_STAT_PHYI) { + writel(NS_STAT_PHYI, card->membase + STAT); + PRINTK("nicstar%d: PHY interrupt.\n", card->index); + if (dev->phy && dev->phy->interrupt) { + dev->phy->interrupt(dev); + } + } + + /* Small Buffer Queue is full */ + if (stat_r & NS_STAT_SFBQF) { + writel(NS_STAT_SFBQF, card->membase + STAT); + printk("nicstar%d: Small free buffer queue is full.\n", + card->index); + } + + /* Large Buffer Queue is full */ + if (stat_r & NS_STAT_LFBQF) { + writel(NS_STAT_LFBQF, card->membase + STAT); + printk("nicstar%d: Large free buffer queue is full.\n", + card->index); + } + + /* Receive Status Queue is full */ + if (stat_r & NS_STAT_RSQF) { + writel(NS_STAT_RSQF, card->membase + STAT); + printk("nicstar%d: RSQ full.\n", card->index); + process_rsq(card); + } + + /* Complete CS-PDU received */ + if (stat_r & NS_STAT_EOPDU) { + RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); + process_rsq(card); + writel(NS_STAT_EOPDU, card->membase + STAT); + } + + /* Raw cell received */ + if (stat_r & NS_STAT_RAWCF) { + writel(NS_STAT_RAWCF, card->membase + STAT); #ifndef RCQ_SUPPORT - printk("nicstar%d: Raw cell received and no support yet...\n", - card->index); + printk("nicstar%d: Raw cell received and no support yet...\n", + card->index); #endif /* RCQ_SUPPORT */ - /* NOTE: the following procedure may keep a raw cell pending until the - next interrupt. As this preliminary support is only meant to - avoid buffer leakage, this is not an issue. */ - while (readl(card->membase + RAWCT) != card->rawch) - { - ns_rcqe *rawcell; - - rawcell = (ns_rcqe *) bus_to_virt(card->rawch); - if (ns_rcqe_islast(rawcell)) - { - struct sk_buff *oldbuf; - - oldbuf = card->rcbuf; - card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell); - card->rawch = (u32) virt_to_bus(card->rcbuf->data); - recycle_rx_buf(card, oldbuf); - } - else - card->rawch += NS_RCQE_SIZE; - } - } - - /* Small buffer queue is empty */ - if (stat_r & NS_STAT_SFBQE) - { - int i; - struct sk_buff *sb; - - writel(NS_STAT_SFBQE, card->membase + STAT); - printk("nicstar%d: Small free buffer queue empty.\n", - card->index); - for (i = 0; i < card->sbnr.min; i++) - { - sb = dev_alloc_skb(NS_SMSKBSIZE); - if (sb == NULL) - { - writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); - card->efbie = 0; - break; - } - NS_SKB_CB(sb)->buf_type = BUF_SM; - skb_queue_tail(&card->sbpool.queue, sb); - skb_reserve(sb, NS_AAL0_HEADER); - push_rxbufs(card, sb); - } - card->sbfqc = i; - process_rsq(card); - } - - /* Large buffer queue empty */ - if (stat_r & NS_STAT_LFBQE) - { - int i; - struct sk_buff *lb; - - writel(NS_STAT_LFBQE, card->membase + STAT); - printk("nicstar%d: Large free buffer queue empty.\n", - card->index); - for (i = 0; i < card->lbnr.min; i++) - { - lb = dev_alloc_skb(NS_LGSKBSIZE); - if (lb == NULL) - { - writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); - card->efbie = 0; - break; - } - NS_SKB_CB(lb)->buf_type = BUF_LG; - skb_queue_tail(&card->lbpool.queue, lb); - skb_reserve(lb, NS_SMBUFSIZE); - push_rxbufs(card, lb); - } - card->lbfqc = i; - process_rsq(card); - } - - /* Receive Status Queue is 7/8 full */ - if (stat_r & NS_STAT_RSQAF) - { - writel(NS_STAT_RSQAF, card->membase + STAT); - RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); - process_rsq(card); - } - - spin_unlock_irqrestore(&card->int_lock, flags); - PRINTK("nicstar%d: end of interrupt service\n", card->index); - return IRQ_HANDLED; + /* NOTE: the following procedure may keep a raw cell pending until the + next interrupt. As this preliminary support is only meant to + avoid buffer leakage, this is not an issue. */ + while (readl(card->membase + RAWCT) != card->rawch) { + + if (ns_rcqe_islast(card->rawcell)) { + struct sk_buff *oldbuf; + + oldbuf = card->rcbuf; + card->rcbuf = idr_find(&card->idr, + ns_rcqe_nextbufhandle(card->rawcell)); + card->rawch = NS_PRV_DMA(card->rcbuf); + card->rawcell = (struct ns_rcqe *) + card->rcbuf->data; + recycle_rx_buf(card, oldbuf); + } else { + card->rawch += NS_RCQE_SIZE; + card->rawcell++; + } + } + } + + /* Small buffer queue is empty */ + if (stat_r & NS_STAT_SFBQE) { + int i; + struct sk_buff *sb; + + writel(NS_STAT_SFBQE, card->membase + STAT); + printk("nicstar%d: Small free buffer queue empty.\n", + card->index); + for (i = 0; i < card->sbnr.min; i++) { + sb = dev_alloc_skb(NS_SMSKBSIZE); + if (sb == NULL) { + writel(readl(card->membase + CFG) & + ~NS_CFG_EFBIE, card->membase + CFG); + card->efbie = 0; + break; + } + NS_PRV_BUFTYPE(sb) = BUF_SM; + skb_queue_tail(&card->sbpool.queue, sb); + skb_reserve(sb, NS_AAL0_HEADER); + push_rxbufs(card, sb); + } + card->sbfqc = i; + process_rsq(card); + } + + /* Large buffer queue empty */ + if (stat_r & NS_STAT_LFBQE) { + int i; + struct sk_buff *lb; + + writel(NS_STAT_LFBQE, card->membase + STAT); + printk("nicstar%d: Large free buffer queue empty.\n", + card->index); + for (i = 0; i < card->lbnr.min; i++) { + lb = dev_alloc_skb(NS_LGSKBSIZE); + if (lb == NULL) { + writel(readl(card->membase + CFG) & + ~NS_CFG_EFBIE, card->membase + CFG); + card->efbie = 0; + break; + } + NS_PRV_BUFTYPE(lb) = BUF_LG; + skb_queue_tail(&card->lbpool.queue, lb); + skb_reserve(lb, NS_SMBUFSIZE); + push_rxbufs(card, lb); + } + card->lbfqc = i; + process_rsq(card); + } + + /* Receive Status Queue is 7/8 full */ + if (stat_r & NS_STAT_RSQAF) { + writel(NS_STAT_RSQAF, card->membase + STAT); + RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); + process_rsq(card); + } + + spin_unlock_irqrestore(&card->int_lock, flags); + PRINTK("nicstar%d: end of interrupt service\n", card->index); + return IRQ_HANDLED; } - - static int ns_open(struct atm_vcc *vcc) { - ns_dev *card; - vc_map *vc; - unsigned long tmpl, modl; - int tcr, tcra; /* target cell rate, and absolute value */ - int n = 0; /* Number of entries in the TST. Initialized to remove - the compiler warning. */ - u32 u32d[4]; - int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler - warning. How I wish compilers were clever enough to - tell which variables can truly be used - uninitialized... */ - int inuse; /* tx or rx vc already in use by another vcc */ - short vpi = vcc->vpi; - int vci = vcc->vci; - - card = (ns_dev *) vcc->dev->dev_data; - PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci); - if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) - { - PRINTK("nicstar%d: unsupported AAL.\n", card->index); - return -EINVAL; - } - - vc = &(card->vcmap[vpi << card->vcibits | vci]); - vcc->dev_data = vc; - - inuse = 0; - if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) - inuse = 1; - if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) - inuse += 2; - if (inuse) - { - printk("nicstar%d: %s vci already in use.\n", card->index, - inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); - return -EINVAL; - } - - set_bit(ATM_VF_ADDR,&vcc->flags); - - /* NOTE: You are not allowed to modify an open connection's QOS. To change - that, remove the ATM_VF_PARTIAL flag checking. There may be other changes - needed to do that. */ - if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) - { - scq_info *scq; - - set_bit(ATM_VF_PARTIAL,&vcc->flags); - if (vcc->qos.txtp.traffic_class == ATM_CBR) - { - /* Check requested cell rate and availability of SCD */ - if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && - vcc->qos.txtp.min_pcr == 0) - { - PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", - card->index); - clear_bit(ATM_VF_PARTIAL,&vcc->flags); - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -EINVAL; - } - - tcr = atm_pcr_goal(&(vcc->qos.txtp)); - tcra = tcr >= 0 ? tcr : -tcr; - - PRINTK("nicstar%d: target cell rate = %d.\n", card->index, - vcc->qos.txtp.max_pcr); - - tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES; - modl = tmpl % card->max_pcr; - - n = (int)(tmpl / card->max_pcr); - if (tcr > 0) - { - if (modl > 0) n++; - } - else if (tcr == 0) - { - if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) - { - PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index); - clear_bit(ATM_VF_PARTIAL,&vcc->flags); - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -EINVAL; - } - } - - if (n == 0) - { - printk("nicstar%d: selected bandwidth < granularity.\n", card->index); - clear_bit(ATM_VF_PARTIAL,&vcc->flags); - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -EINVAL; - } - - if (n > (card->tst_free_entries - NS_TST_RESERVED)) - { - PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index); - clear_bit(ATM_VF_PARTIAL,&vcc->flags); - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -EINVAL; - } - else - card->tst_free_entries -= n; - - XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); - for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) - { - if (card->scd2vc[frscdi] == NULL) - { - card->scd2vc[frscdi] = vc; - break; - } - } - if (frscdi == NS_FRSCD_NUM) - { - PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index); - card->tst_free_entries += n; - clear_bit(ATM_VF_PARTIAL,&vcc->flags); - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -EBUSY; - } - - vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; - - scq = get_scq(CBR_SCQSIZE, vc->cbr_scd); - if (scq == NULL) - { - PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); - card->scd2vc[frscdi] = NULL; - card->tst_free_entries += n; - clear_bit(ATM_VF_PARTIAL,&vcc->flags); - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -ENOMEM; - } - vc->scq = scq; - u32d[0] = (u32) virt_to_bus(scq->base); - u32d[1] = (u32) 0x00000000; - u32d[2] = (u32) 0xffffffff; - u32d[3] = (u32) 0x00000000; - ns_write_sram(card, vc->cbr_scd, u32d, 4); - - fill_tst(card, n, vc); - } - else if (vcc->qos.txtp.traffic_class == ATM_UBR) - { - vc->cbr_scd = 0x00000000; - vc->scq = card->scq0; - } - - if (vcc->qos.txtp.traffic_class != ATM_NONE) - { - vc->tx = 1; - vc->tx_vcc = vcc; - vc->tbd_count = 0; - } - if (vcc->qos.rxtp.traffic_class != ATM_NONE) - { - u32 status; - - vc->rx = 1; - vc->rx_vcc = vcc; - vc->rx_iov = NULL; - - /* Open the connection in hardware */ - if (vcc->qos.aal == ATM_AAL5) - status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; - else /* vcc->qos.aal == ATM_AAL0 */ - status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; + ns_dev *card; + vc_map *vc; + unsigned long tmpl, modl; + int tcr, tcra; /* target cell rate, and absolute value */ + int n = 0; /* Number of entries in the TST. Initialized to remove + the compiler warning. */ + u32 u32d[4]; + int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler + warning. How I wish compilers were clever enough to + tell which variables can truly be used + uninitialized... */ + int inuse; /* tx or rx vc already in use by another vcc */ + short vpi = vcc->vpi; + int vci = vcc->vci; + + card = (ns_dev *) vcc->dev->dev_data; + PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, + vci); + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { + PRINTK("nicstar%d: unsupported AAL.\n", card->index); + return -EINVAL; + } + + vc = &(card->vcmap[vpi << card->vcibits | vci]); + vcc->dev_data = vc; + + inuse = 0; + if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) + inuse = 1; + if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) + inuse += 2; + if (inuse) { + printk("nicstar%d: %s vci already in use.\n", card->index, + inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); + return -EINVAL; + } + + set_bit(ATM_VF_ADDR, &vcc->flags); + + /* NOTE: You are not allowed to modify an open connection's QOS. To change + that, remove the ATM_VF_PARTIAL flag checking. There may be other changes + needed to do that. */ + if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { + scq_info *scq; + + set_bit(ATM_VF_PARTIAL, &vcc->flags); + if (vcc->qos.txtp.traffic_class == ATM_CBR) { + /* Check requested cell rate and availability of SCD */ + if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 + && vcc->qos.txtp.min_pcr == 0) { + PRINTK + ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", + card->index); + clear_bit(ATM_VF_PARTIAL, &vcc->flags); + clear_bit(ATM_VF_ADDR, &vcc->flags); + return -EINVAL; + } + + tcr = atm_pcr_goal(&(vcc->qos.txtp)); + tcra = tcr >= 0 ? tcr : -tcr; + + PRINTK("nicstar%d: target cell rate = %d.\n", + card->index, vcc->qos.txtp.max_pcr); + + tmpl = + (unsigned long)tcra *(unsigned long) + NS_TST_NUM_ENTRIES; + modl = tmpl % card->max_pcr; + + n = (int)(tmpl / card->max_pcr); + if (tcr > 0) { + if (modl > 0) + n++; + } else if (tcr == 0) { + if ((n = + (card->tst_free_entries - + NS_TST_RESERVED)) <= 0) { + PRINTK + ("nicstar%d: no CBR bandwidth free.\n", + card->index); + clear_bit(ATM_VF_PARTIAL, &vcc->flags); + clear_bit(ATM_VF_ADDR, &vcc->flags); + return -EINVAL; + } + } + + if (n == 0) { + printk + ("nicstar%d: selected bandwidth < granularity.\n", + card->index); + clear_bit(ATM_VF_PARTIAL, &vcc->flags); + clear_bit(ATM_VF_ADDR, &vcc->flags); + return -EINVAL; + } + + if (n > (card->tst_free_entries - NS_TST_RESERVED)) { + PRINTK + ("nicstar%d: not enough free CBR bandwidth.\n", + card->index); + clear_bit(ATM_VF_PARTIAL, &vcc->flags); + clear_bit(ATM_VF_ADDR, &vcc->flags); + return -EINVAL; + } else + card->tst_free_entries -= n; + + XPRINTK("nicstar%d: writing %d tst entries.\n", + card->index, n); + for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { + if (card->scd2vc[frscdi] == NULL) { + card->scd2vc[frscdi] = vc; + break; + } + } + if (frscdi == NS_FRSCD_NUM) { + PRINTK + ("nicstar%d: no SCD available for CBR channel.\n", + card->index); + card->tst_free_entries += n; + clear_bit(ATM_VF_PARTIAL, &vcc->flags); + clear_bit(ATM_VF_ADDR, &vcc->flags); + return -EBUSY; + } + + vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; + + scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); + if (scq == NULL) { + PRINTK("nicstar%d: can't get fixed rate SCQ.\n", + card->index); + card->scd2vc[frscdi] = NULL; + card->tst_free_entries += n; + clear_bit(ATM_VF_PARTIAL, &vcc->flags); + clear_bit(ATM_VF_ADDR, &vcc->flags); + return -ENOMEM; + } + vc->scq = scq; + u32d[0] = scq_virt_to_bus(scq, scq->base); + u32d[1] = (u32) 0x00000000; + u32d[2] = (u32) 0xffffffff; + u32d[3] = (u32) 0x00000000; + ns_write_sram(card, vc->cbr_scd, u32d, 4); + + fill_tst(card, n, vc); + } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { + vc->cbr_scd = 0x00000000; + vc->scq = card->scq0; + } + + if (vcc->qos.txtp.traffic_class != ATM_NONE) { + vc->tx = 1; + vc->tx_vcc = vcc; + vc->tbd_count = 0; + } + if (vcc->qos.rxtp.traffic_class != ATM_NONE) { + u32 status; + + vc->rx = 1; + vc->rx_vcc = vcc; + vc->rx_iov = NULL; + + /* Open the connection in hardware */ + if (vcc->qos.aal == ATM_AAL5) + status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; + else /* vcc->qos.aal == ATM_AAL0 */ + status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; #ifdef RCQ_SUPPORT - status |= NS_RCTE_RAWCELLINTEN; + status |= NS_RCTE_RAWCELLINTEN; #endif /* RCQ_SUPPORT */ - ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * - NS_RCT_ENTRY_SIZE, &status, 1); - } - - } - - set_bit(ATM_VF_READY,&vcc->flags); - return 0; -} + ns_write_sram(card, + NS_RCT + + (vpi << card->vcibits | vci) * + NS_RCT_ENTRY_SIZE, &status, 1); + } + } + set_bit(ATM_VF_READY, &vcc->flags); + return 0; +} static void ns_close(struct atm_vcc *vcc) { - vc_map *vc; - ns_dev *card; - u32 data; - int i; - - vc = vcc->dev_data; - card = vcc->dev->dev_data; - PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, - (int) vcc->vpi, vcc->vci); - - clear_bit(ATM_VF_READY,&vcc->flags); - - if (vcc->qos.rxtp.traffic_class != ATM_NONE) - { - u32 addr; - unsigned long flags; - - addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; - spin_lock_irqsave(&card->res_lock, flags); - while(CMD_BUSY(card)); - writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); - spin_unlock_irqrestore(&card->res_lock, flags); - - vc->rx = 0; - if (vc->rx_iov != NULL) - { - struct sk_buff *iovb; - u32 stat; - - stat = readl(card->membase + STAT); - card->sbfqc = ns_stat_sfbqc_get(stat); - card->lbfqc = ns_stat_lfbqc_get(stat); - - PRINTK("nicstar%d: closing a VC with pending rx buffers.\n", - card->index); - iovb = vc->rx_iov; - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, - NS_SKB(iovb)->iovcnt); - NS_SKB(iovb)->iovcnt = 0; - NS_SKB(iovb)->vcc = NULL; - spin_lock_irqsave(&card->int_lock, flags); - recycle_iov_buf(card, iovb); - spin_unlock_irqrestore(&card->int_lock, flags); - vc->rx_iov = NULL; - } - } - - if (vcc->qos.txtp.traffic_class != ATM_NONE) - { - vc->tx = 0; - } - - if (vcc->qos.txtp.traffic_class == ATM_CBR) - { - unsigned long flags; - ns_scqe *scqep; - scq_info *scq; - - scq = vc->scq; - - for (;;) - { - spin_lock_irqsave(&scq->lock, flags); - scqep = scq->next; - if (scqep == scq->base) - scqep = scq->last; - else - scqep--; - if (scqep == scq->tail) - { - spin_unlock_irqrestore(&scq->lock, flags); - break; - } - /* If the last entry is not a TSR, place one in the SCQ in order to - be able to completely drain it and then close. */ - if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) - { - ns_scqe tsr; - u32 scdi, scqi; - u32 data; - int index; - - tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); - scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; - scqi = scq->next - scq->base; - tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); - tsr.word_3 = 0x00000000; - tsr.word_4 = 0x00000000; - *scq->next = tsr; - index = (int) scqi; - scq->skb[index] = NULL; - if (scq->next == scq->last) - scq->next = scq->base; - else - scq->next++; - data = (u32) virt_to_bus(scq->next); - ns_write_sram(card, scq->scd, &data, 1); - } - spin_unlock_irqrestore(&scq->lock, flags); - schedule(); - } - - /* Free all TST entries */ - data = NS_TST_OPCODE_VARIABLE; - for (i = 0; i < NS_TST_NUM_ENTRIES; i++) - { - if (card->tste2vc[i] == vc) - { - ns_write_sram(card, card->tst_addr + i, &data, 1); - card->tste2vc[i] = NULL; - card->tst_free_entries++; - } - } - - card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; - free_scq(vc->scq, vcc); - } - - /* remove all references to vcc before deleting it */ - if (vcc->qos.txtp.traffic_class != ATM_NONE) - { - unsigned long flags; - scq_info *scq = card->scq0; - - spin_lock_irqsave(&scq->lock, flags); - - for(i = 0; i < scq->num_entries; i++) { - if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { - ATM_SKB(scq->skb[i])->vcc = NULL; - atm_return(vcc, scq->skb[i]->truesize); - PRINTK("nicstar: deleted pending vcc mapping\n"); - } - } - - spin_unlock_irqrestore(&scq->lock, flags); - } - - vcc->dev_data = NULL; - clear_bit(ATM_VF_PARTIAL,&vcc->flags); - clear_bit(ATM_VF_ADDR,&vcc->flags); + vc_map *vc; + ns_dev *card; + u32 data; + int i; + + vc = vcc->dev_data; + card = vcc->dev->dev_data; + PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, + (int)vcc->vpi, vcc->vci); + + clear_bit(ATM_VF_READY, &vcc->flags); + + if (vcc->qos.rxtp.traffic_class != ATM_NONE) { + u32 addr; + unsigned long flags; + + addr = + NS_RCT + + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; + spin_lock_irqsave(&card->res_lock, flags); + while (CMD_BUSY(card)) ; + writel(NS_CMD_CLOSE_CONNECTION | addr << 2, + card->membase + CMD); + spin_unlock_irqrestore(&card->res_lock, flags); + + vc->rx = 0; + if (vc->rx_iov != NULL) { + struct sk_buff *iovb; + u32 stat; + + stat = readl(card->membase + STAT); + card->sbfqc = ns_stat_sfbqc_get(stat); + card->lbfqc = ns_stat_lfbqc_get(stat); + + PRINTK + ("nicstar%d: closing a VC with pending rx buffers.\n", + card->index); + iovb = vc->rx_iov; + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_PRV_IOVCNT(iovb)); + NS_PRV_IOVCNT(iovb) = 0; + spin_lock_irqsave(&card->int_lock, flags); + recycle_iov_buf(card, iovb); + spin_unlock_irqrestore(&card->int_lock, flags); + vc->rx_iov = NULL; + } + } + + if (vcc->qos.txtp.traffic_class != ATM_NONE) { + vc->tx = 0; + } + + if (vcc->qos.txtp.traffic_class == ATM_CBR) { + unsigned long flags; + ns_scqe *scqep; + scq_info *scq; + + scq = vc->scq; + + for (;;) { + spin_lock_irqsave(&scq->lock, flags); + scqep = scq->next; + if (scqep == scq->base) + scqep = scq->last; + else + scqep--; + if (scqep == scq->tail) { + spin_unlock_irqrestore(&scq->lock, flags); + break; + } + /* If the last entry is not a TSR, place one in the SCQ in order to + be able to completely drain it and then close. */ + if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { + ns_scqe tsr; + u32 scdi, scqi; + u32 data; + int index; + + tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); + scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; + scqi = scq->next - scq->base; + tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); + tsr.word_3 = 0x00000000; + tsr.word_4 = 0x00000000; + *scq->next = tsr; + index = (int)scqi; + scq->skb[index] = NULL; + if (scq->next == scq->last) + scq->next = scq->base; + else + scq->next++; + data = scq_virt_to_bus(scq, scq->next); + ns_write_sram(card, scq->scd, &data, 1); + } + spin_unlock_irqrestore(&scq->lock, flags); + schedule(); + } + + /* Free all TST entries */ + data = NS_TST_OPCODE_VARIABLE; + for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { + if (card->tste2vc[i] == vc) { + ns_write_sram(card, card->tst_addr + i, &data, + 1); + card->tste2vc[i] = NULL; + card->tst_free_entries++; + } + } + + card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; + free_scq(card, vc->scq, vcc); + } + + /* remove all references to vcc before deleting it */ + if (vcc->qos.txtp.traffic_class != ATM_NONE) { + unsigned long flags; + scq_info *scq = card->scq0; + + spin_lock_irqsave(&scq->lock, flags); + + for (i = 0; i < scq->num_entries; i++) { + if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { + ATM_SKB(scq->skb[i])->vcc = NULL; + atm_return(vcc, scq->skb[i]->truesize); + PRINTK + ("nicstar: deleted pending vcc mapping\n"); + } + } + + spin_unlock_irqrestore(&scq->lock, flags); + } + + vcc->dev_data = NULL; + clear_bit(ATM_VF_PARTIAL, &vcc->flags); + clear_bit(ATM_VF_ADDR, &vcc->flags); #ifdef RX_DEBUG - { - u32 stat, cfg; - stat = readl(card->membase + STAT); - cfg = readl(card->membase + CFG); - printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); - printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n", - (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last, - readl(card->membase + TSQT)); - printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n", - (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last, - readl(card->membase + RSQT)); - printk("Empty free buffer queue interrupt %s \n", - card->efbie ? "enabled" : "disabled"); - printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", - ns_stat_sfbqc_get(stat), card->sbpool.count, - ns_stat_lfbqc_get(stat), card->lbpool.count); - printk("hbpool.count = %d iovpool.count = %d \n", - card->hbpool.count, card->iovpool.count); - } + { + u32 stat, cfg; + stat = readl(card->membase + STAT); + cfg = readl(card->membase + CFG); + printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); + printk + ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n", + card->tsq.base, card->tsq.next, + card->tsq.last, readl(card->membase + TSQT)); + printk + ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n", + card->rsq.base, card->rsq.next, + card->rsq.last, readl(card->membase + RSQT)); + printk("Empty free buffer queue interrupt %s \n", + card->efbie ? "enabled" : "disabled"); + printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", + ns_stat_sfbqc_get(stat), card->sbpool.count, + ns_stat_lfbqc_get(stat), card->lbpool.count); + printk("hbpool.count = %d iovpool.count = %d \n", + card->hbpool.count, card->iovpool.count); + } #endif /* RX_DEBUG */ } - - -static void fill_tst(ns_dev *card, int n, vc_map *vc) +static void fill_tst(ns_dev * card, int n, vc_map * vc) { - u32 new_tst; - unsigned long cl; - int e, r; - u32 data; - - /* It would be very complicated to keep the two TSTs synchronized while - assuring that writes are only made to the inactive TST. So, for now I - will use only one TST. If problems occur, I will change this again */ - - new_tst = card->tst_addr; - - /* Fill procedure */ - - for (e = 0; e < NS_TST_NUM_ENTRIES; e++) - { - if (card->tste2vc[e] == NULL) - break; - } - if (e == NS_TST_NUM_ENTRIES) { - printk("nicstar%d: No free TST entries found. \n", card->index); - return; - } - - r = n; - cl = NS_TST_NUM_ENTRIES; - data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); - - while (r > 0) - { - if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) - { - card->tste2vc[e] = vc; - ns_write_sram(card, new_tst + e, &data, 1); - cl -= NS_TST_NUM_ENTRIES; - r--; - } - - if (++e == NS_TST_NUM_ENTRIES) { - e = 0; - } - cl += n; - } - - /* End of fill procedure */ - - data = ns_tste_make(NS_TST_OPCODE_END, new_tst); - ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); - ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); - card->tst_addr = new_tst; + u32 new_tst; + unsigned long cl; + int e, r; + u32 data; + + /* It would be very complicated to keep the two TSTs synchronized while + assuring that writes are only made to the inactive TST. So, for now I + will use only one TST. If problems occur, I will change this again */ + + new_tst = card->tst_addr; + + /* Fill procedure */ + + for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { + if (card->tste2vc[e] == NULL) + break; + } + if (e == NS_TST_NUM_ENTRIES) { + printk("nicstar%d: No free TST entries found. \n", card->index); + return; + } + + r = n; + cl = NS_TST_NUM_ENTRIES; + data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); + + while (r > 0) { + if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { + card->tste2vc[e] = vc; + ns_write_sram(card, new_tst + e, &data, 1); + cl -= NS_TST_NUM_ENTRIES; + r--; + } + + if (++e == NS_TST_NUM_ENTRIES) { + e = 0; + } + cl += n; + } + + /* End of fill procedure */ + + data = ns_tste_make(NS_TST_OPCODE_END, new_tst); + ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); + ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); + card->tst_addr = new_tst; } - - static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) { - ns_dev *card; - vc_map *vc; - scq_info *scq; - unsigned long buflen; - ns_scqe scqe; - u32 flags; /* TBD flags, not CPU flags */ - - card = vcc->dev->dev_data; - TXPRINTK("nicstar%d: ns_send() called.\n", card->index); - if ((vc = (vc_map *) vcc->dev_data) == NULL) - { - printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); - atomic_inc(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (!vc->tx) - { - printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); - atomic_inc(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) - { - printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); - atomic_inc(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (skb_shinfo(skb)->nr_frags != 0) - { - printk("nicstar%d: No scatter-gather yet.\n", card->index); - atomic_inc(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EINVAL; - } - - ATM_SKB(skb)->vcc = vcc; - - if (vcc->qos.aal == ATM_AAL5) - { - buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ - flags = NS_TBD_AAL5; - scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data)); - scqe.word_3 = cpu_to_le32((u32) skb->len); - scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, - ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0); - flags |= NS_TBD_EOPDU; - } - else /* (vcc->qos.aal == ATM_AAL0) */ - { - buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ - flags = NS_TBD_AAL0; - scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER); - scqe.word_3 = cpu_to_le32(0x00000000); - if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ - flags |= NS_TBD_EOPDU; - scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); - /* Force the VPI/VCI to be the same as in VCC struct */ - scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT | - ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) & - NS_TBD_VC_MASK); - } - - if (vcc->qos.txtp.traffic_class == ATM_CBR) - { - scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); - scq = ((vc_map *) vcc->dev_data)->scq; - } - else - { - scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); - scq = card->scq0; - } - - if (push_scqe(card, vc, scq, &scqe, skb) != 0) - { - atomic_inc(&vcc->stats->tx_err); - dev_kfree_skb_any(skb); - return -EIO; - } - atomic_inc(&vcc->stats->tx); - - return 0; -} - + ns_dev *card; + vc_map *vc; + scq_info *scq; + unsigned long buflen; + ns_scqe scqe; + u32 flags; /* TBD flags, not CPU flags */ + + card = vcc->dev->dev_data; + TXPRINTK("nicstar%d: ns_send() called.\n", card->index); + if ((vc = (vc_map *) vcc->dev_data) == NULL) { + printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", + card->index); + atomic_inc(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } + if (!vc->tx) { + printk("nicstar%d: Trying to transmit on a non-tx VC.\n", + card->index); + atomic_inc(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } -static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, - struct sk_buff *skb) -{ - unsigned long flags; - ns_scqe tsr; - u32 scdi, scqi; - int scq_is_vbr; - u32 data; - int index; - - spin_lock_irqsave(&scq->lock, flags); - while (scq->tail == scq->next) - { - if (in_interrupt()) { - spin_unlock_irqrestore(&scq->lock, flags); - printk("nicstar%d: Error pushing TBD.\n", card->index); - return 1; - } - - scq->full = 1; - spin_unlock_irqrestore(&scq->lock, flags); - interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); - spin_lock_irqsave(&scq->lock, flags); - - if (scq->full) { - spin_unlock_irqrestore(&scq->lock, flags); - printk("nicstar%d: Timeout pushing TBD.\n", card->index); - return 1; - } - } - *scq->next = *tbd; - index = (int) (scq->next - scq->base); - scq->skb[index] = skb; - XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n", - card->index, (u32) skb, index); - XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", - card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), - le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), - (u32) scq->next); - if (scq->next == scq->last) - scq->next = scq->base; - else - scq->next++; - - vc->tbd_count++; - if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) - { - scq->tbd_count++; - scq_is_vbr = 1; - } - else - scq_is_vbr = 0; - - if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) - { - int has_run = 0; - - while (scq->tail == scq->next) - { - if (in_interrupt()) { - data = (u32) virt_to_bus(scq->next); - ns_write_sram(card, scq->scd, &data, 1); - spin_unlock_irqrestore(&scq->lock, flags); - printk("nicstar%d: Error pushing TSR.\n", card->index); - return 0; - } - - scq->full = 1; - if (has_run++) break; - spin_unlock_irqrestore(&scq->lock, flags); - interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); - spin_lock_irqsave(&scq->lock, flags); - } - - if (!scq->full) - { - tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); - if (scq_is_vbr) - scdi = NS_TSR_SCDISVBR; - else - scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; - scqi = scq->next - scq->base; - tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); - tsr.word_3 = 0x00000000; - tsr.word_4 = 0x00000000; - - *scq->next = tsr; - index = (int) scqi; - scq->skb[index] = NULL; - XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", - card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), - le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), - (u32) scq->next); - if (scq->next == scq->last) - scq->next = scq->base; - else - scq->next++; - vc->tbd_count = 0; - scq->tbd_count = 0; - } - else - PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); - } - data = (u32) virt_to_bus(scq->next); - ns_write_sram(card, scq->scd, &data, 1); - - spin_unlock_irqrestore(&scq->lock, flags); - - return 0; -} + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { + printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", + card->index); + atomic_inc(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } + if (skb_shinfo(skb)->nr_frags != 0) { + printk("nicstar%d: No scatter-gather yet.\n", card->index); + atomic_inc(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + ATM_SKB(skb)->vcc = vcc; + + NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data, + skb->len, PCI_DMA_TODEVICE); + + if (vcc->qos.aal == ATM_AAL5) { + buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ + flags = NS_TBD_AAL5; + scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); + scqe.word_3 = cpu_to_le32(skb->len); + scqe.word_4 = + ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, + ATM_SKB(skb)-> + atm_options & ATM_ATMOPT_CLP ? 1 : 0); + flags |= NS_TBD_EOPDU; + } else { /* (vcc->qos.aal == ATM_AAL0) */ + + buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ + flags = NS_TBD_AAL0; + scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); + scqe.word_3 = cpu_to_le32(0x00000000); + if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ + flags |= NS_TBD_EOPDU; + scqe.word_4 = + cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); + /* Force the VPI/VCI to be the same as in VCC struct */ + scqe.word_4 |= + cpu_to_le32((((u32) vcc-> + vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> + vci) << + NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); + } + + if (vcc->qos.txtp.traffic_class == ATM_CBR) { + scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); + scq = ((vc_map *) vcc->dev_data)->scq; + } else { + scqe.word_1 = + ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); + scq = card->scq0; + } + + if (push_scqe(card, vc, scq, &scqe, skb) != 0) { + atomic_inc(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EIO; + } + atomic_inc(&vcc->stats->tx); + return 0; +} -static void process_tsq(ns_dev *card) +static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, + struct sk_buff *skb) { - u32 scdi; - scq_info *scq; - ns_tsi *previous = NULL, *one_ahead, *two_ahead; - int serviced_entries; /* flag indicating at least on entry was serviced */ - - serviced_entries = 0; - - if (card->tsq.next == card->tsq.last) - one_ahead = card->tsq.base; - else - one_ahead = card->tsq.next + 1; - - if (one_ahead == card->tsq.last) - two_ahead = card->tsq.base; - else - two_ahead = one_ahead + 1; - - while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || - !ns_tsi_isempty(two_ahead)) - /* At most two empty, as stated in the 77201 errata */ - { - serviced_entries = 1; - - /* Skip the one or two possible empty entries */ - while (ns_tsi_isempty(card->tsq.next)) { - if (card->tsq.next == card->tsq.last) - card->tsq.next = card->tsq.base; - else - card->tsq.next++; - } - - if (!ns_tsi_tmrof(card->tsq.next)) - { - scdi = ns_tsi_getscdindex(card->tsq.next); - if (scdi == NS_TSI_SCDISVBR) - scq = card->scq0; - else - { - if (card->scd2vc[scdi] == NULL) - { - printk("nicstar%d: could not find VC from SCD index.\n", - card->index); - ns_tsi_init(card->tsq.next); - return; - } - scq = card->scd2vc[scdi]->scq; - } - drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); - scq->full = 0; - wake_up_interruptible(&(scq->scqfull_waitq)); - } - - ns_tsi_init(card->tsq.next); - previous = card->tsq.next; - if (card->tsq.next == card->tsq.last) - card->tsq.next = card->tsq.base; - else - card->tsq.next++; - - if (card->tsq.next == card->tsq.last) - one_ahead = card->tsq.base; - else - one_ahead = card->tsq.next + 1; - - if (one_ahead == card->tsq.last) - two_ahead = card->tsq.base; - else - two_ahead = one_ahead + 1; - } - - if (serviced_entries) { - writel((((u32) previous) - ((u32) card->tsq.base)), - card->membase + TSQH); - } + unsigned long flags; + ns_scqe tsr; + u32 scdi, scqi; + int scq_is_vbr; + u32 data; + int index; + + spin_lock_irqsave(&scq->lock, flags); + while (scq->tail == scq->next) { + if (in_interrupt()) { + spin_unlock_irqrestore(&scq->lock, flags); + printk("nicstar%d: Error pushing TBD.\n", card->index); + return 1; + } + + scq->full = 1; + spin_unlock_irqrestore(&scq->lock, flags); + interruptible_sleep_on_timeout(&scq->scqfull_waitq, + SCQFULL_TIMEOUT); + spin_lock_irqsave(&scq->lock, flags); + + if (scq->full) { + spin_unlock_irqrestore(&scq->lock, flags); + printk("nicstar%d: Timeout pushing TBD.\n", + card->index); + return 1; + } + } + *scq->next = *tbd; + index = (int)(scq->next - scq->base); + scq->skb[index] = skb; + XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", + card->index, skb, index); + XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", + card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), + le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), + scq->next); + if (scq->next == scq->last) + scq->next = scq->base; + else + scq->next++; + + vc->tbd_count++; + if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { + scq->tbd_count++; + scq_is_vbr = 1; + } else + scq_is_vbr = 0; + + if (vc->tbd_count >= MAX_TBD_PER_VC + || scq->tbd_count >= MAX_TBD_PER_SCQ) { + int has_run = 0; + + while (scq->tail == scq->next) { + if (in_interrupt()) { + data = scq_virt_to_bus(scq, scq->next); + ns_write_sram(card, scq->scd, &data, 1); + spin_unlock_irqrestore(&scq->lock, flags); + printk("nicstar%d: Error pushing TSR.\n", + card->index); + return 0; + } + + scq->full = 1; + if (has_run++) + break; + spin_unlock_irqrestore(&scq->lock, flags); + interruptible_sleep_on_timeout(&scq->scqfull_waitq, + SCQFULL_TIMEOUT); + spin_lock_irqsave(&scq->lock, flags); + } + + if (!scq->full) { + tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); + if (scq_is_vbr) + scdi = NS_TSR_SCDISVBR; + else + scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; + scqi = scq->next - scq->base; + tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); + tsr.word_3 = 0x00000000; + tsr.word_4 = 0x00000000; + + *scq->next = tsr; + index = (int)scqi; + scq->skb[index] = NULL; + XPRINTK + ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", + card->index, le32_to_cpu(tsr.word_1), + le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), + le32_to_cpu(tsr.word_4), scq->next); + if (scq->next == scq->last) + scq->next = scq->base; + else + scq->next++; + vc->tbd_count = 0; + scq->tbd_count = 0; + } else + PRINTK("nicstar%d: Timeout pushing TSR.\n", + card->index); + } + data = scq_virt_to_bus(scq, scq->next); + ns_write_sram(card, scq->scd, &data, 1); + + spin_unlock_irqrestore(&scq->lock, flags); + + return 0; } - - -static void drain_scq(ns_dev *card, scq_info *scq, int pos) +static void process_tsq(ns_dev * card) { - struct atm_vcc *vcc; - struct sk_buff *skb; - int i; - unsigned long flags; - - XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n", - card->index, (u32) scq, pos); - if (pos >= scq->num_entries) - { - printk("nicstar%d: Bad index on drain_scq().\n", card->index); - return; - } - - spin_lock_irqsave(&scq->lock, flags); - i = (int) (scq->tail - scq->base); - if (++i == scq->num_entries) - i = 0; - while (i != pos) - { - skb = scq->skb[i]; - XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n", - card->index, (u32) skb, i); - if (skb != NULL) - { - vcc = ATM_SKB(skb)->vcc; - if (vcc && vcc->pop != NULL) { - vcc->pop(vcc, skb); - } else { - dev_kfree_skb_irq(skb); - } - scq->skb[i] = NULL; - } - if (++i == scq->num_entries) - i = 0; - } - scq->tail = scq->base + pos; - spin_unlock_irqrestore(&scq->lock, flags); + u32 scdi; + scq_info *scq; + ns_tsi *previous = NULL, *one_ahead, *two_ahead; + int serviced_entries; /* flag indicating at least on entry was serviced */ + + serviced_entries = 0; + + if (card->tsq.next == card->tsq.last) + one_ahead = card->tsq.base; + else + one_ahead = card->tsq.next + 1; + + if (one_ahead == card->tsq.last) + two_ahead = card->tsq.base; + else + two_ahead = one_ahead + 1; + + while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || + !ns_tsi_isempty(two_ahead)) + /* At most two empty, as stated in the 77201 errata */ + { + serviced_entries = 1; + + /* Skip the one or two possible empty entries */ + while (ns_tsi_isempty(card->tsq.next)) { + if (card->tsq.next == card->tsq.last) + card->tsq.next = card->tsq.base; + else + card->tsq.next++; + } + + if (!ns_tsi_tmrof(card->tsq.next)) { + scdi = ns_tsi_getscdindex(card->tsq.next); + if (scdi == NS_TSI_SCDISVBR) + scq = card->scq0; + else { + if (card->scd2vc[scdi] == NULL) { + printk + ("nicstar%d: could not find VC from SCD index.\n", + card->index); + ns_tsi_init(card->tsq.next); + return; + } + scq = card->scd2vc[scdi]->scq; + } + drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); + scq->full = 0; + wake_up_interruptible(&(scq->scqfull_waitq)); + } + + ns_tsi_init(card->tsq.next); + previous = card->tsq.next; + if (card->tsq.next == card->tsq.last) + card->tsq.next = card->tsq.base; + else + card->tsq.next++; + + if (card->tsq.next == card->tsq.last) + one_ahead = card->tsq.base; + else + one_ahead = card->tsq.next + 1; + + if (one_ahead == card->tsq.last) + two_ahead = card->tsq.base; + else + two_ahead = one_ahead + 1; + } + + if (serviced_entries) + writel(PTR_DIFF(previous, card->tsq.base), + card->membase + TSQH); } +static void drain_scq(ns_dev * card, scq_info * scq, int pos) +{ + struct atm_vcc *vcc; + struct sk_buff *skb; + int i; + unsigned long flags; + + XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", + card->index, scq, pos); + if (pos >= scq->num_entries) { + printk("nicstar%d: Bad index on drain_scq().\n", card->index); + return; + } + + spin_lock_irqsave(&scq->lock, flags); + i = (int)(scq->tail - scq->base); + if (++i == scq->num_entries) + i = 0; + while (i != pos) { + skb = scq->skb[i]; + XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", + card->index, skb, i); + if (skb != NULL) { + pci_unmap_single(card->pcidev, + NS_PRV_DMA(skb), + skb->len, + PCI_DMA_TODEVICE); + vcc = ATM_SKB(skb)->vcc; + if (vcc && vcc->pop != NULL) { + vcc->pop(vcc, skb); + } else { + dev_kfree_skb_irq(skb); + } + scq->skb[i] = NULL; + } + if (++i == scq->num_entries) + i = 0; + } + scq->tail = scq->base + pos; + spin_unlock_irqrestore(&scq->lock, flags); +} - -static void process_rsq(ns_dev *card) +static void process_rsq(ns_dev * card) { - ns_rsqe *previous; - - if (!ns_rsqe_valid(card->rsq.next)) - return; - do { - dequeue_rx(card, card->rsq.next); - ns_rsqe_init(card->rsq.next); - previous = card->rsq.next; - if (card->rsq.next == card->rsq.last) - card->rsq.next = card->rsq.base; - else - card->rsq.next++; - } while (ns_rsqe_valid(card->rsq.next)); - writel((((u32) previous) - ((u32) card->rsq.base)), - card->membase + RSQH); + ns_rsqe *previous; + + if (!ns_rsqe_valid(card->rsq.next)) + return; + do { + dequeue_rx(card, card->rsq.next); + ns_rsqe_init(card->rsq.next); + previous = card->rsq.next; + if (card->rsq.next == card->rsq.last) + card->rsq.next = card->rsq.base; + else + card->rsq.next++; + } while (ns_rsqe_valid(card->rsq.next)); + writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); } +static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) +{ + u32 vpi, vci; + vc_map *vc; + struct sk_buff *iovb; + struct iovec *iov; + struct atm_vcc *vcc; + struct sk_buff *skb; + unsigned short aal5_len; + int len; + u32 stat; + u32 id; + + stat = readl(card->membase + STAT); + card->sbfqc = ns_stat_sfbqc_get(stat); + card->lbfqc = ns_stat_lfbqc_get(stat); + + id = le32_to_cpu(rsqe->buffer_handle); + skb = idr_find(&card->idr, id); + if (!skb) { + RXPRINTK(KERN_ERR + "nicstar%d: idr_find() failed!\n", card->index); + return; + } + idr_remove(&card->idr, id); + pci_dma_sync_single_for_cpu(card->pcidev, + NS_PRV_DMA(skb), + (NS_PRV_BUFTYPE(skb) == BUF_SM + ? NS_SMSKBSIZE : NS_LGSKBSIZE), + PCI_DMA_FROMDEVICE); + pci_unmap_single(card->pcidev, + NS_PRV_DMA(skb), + (NS_PRV_BUFTYPE(skb) == BUF_SM + ? NS_SMSKBSIZE : NS_LGSKBSIZE), + PCI_DMA_FROMDEVICE); + vpi = ns_rsqe_vpi(rsqe); + vci = ns_rsqe_vci(rsqe); + if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { + printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", + card->index, vpi, vci); + recycle_rx_buf(card, skb); + return; + } + + vc = &(card->vcmap[vpi << card->vcibits | vci]); + if (!vc->rx) { + RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", + card->index, vpi, vci); + recycle_rx_buf(card, skb); + return; + } + + vcc = vc->rx_vcc; + + if (vcc->qos.aal == ATM_AAL0) { + struct sk_buff *sb; + unsigned char *cell; + int i; + + cell = skb->data; + for (i = ns_rsqe_cellcount(rsqe); i; i--) { + if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { + printk + ("nicstar%d: Can't allocate buffers for aal0.\n", + card->index); + atomic_add(i, &vcc->stats->rx_drop); + break; + } + if (!atm_charge(vcc, sb->truesize)) { + RXPRINTK + ("nicstar%d: atm_charge() dropped aal0 packets.\n", + card->index); + atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ + dev_kfree_skb_any(sb); + break; + } + /* Rebuild the header */ + *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | + (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); + if (i == 1 && ns_rsqe_eopdu(rsqe)) + *((u32 *) sb->data) |= 0x00000002; + skb_put(sb, NS_AAL0_HEADER); + memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); + skb_put(sb, ATM_CELL_PAYLOAD); + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); + atomic_inc(&vcc->stats->rx); + cell += ATM_CELL_PAYLOAD; + } + + recycle_rx_buf(card, skb); + return; + } + + /* To reach this point, the AAL layer can only be AAL5 */ + + if ((iovb = vc->rx_iov) == NULL) { + iovb = skb_dequeue(&(card->iovpool.queue)); + if (iovb == NULL) { /* No buffers in the queue */ + iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); + if (iovb == NULL) { + printk("nicstar%d: Out of iovec buffers.\n", + card->index); + atomic_inc(&vcc->stats->rx_drop); + recycle_rx_buf(card, skb); + return; + } + NS_PRV_BUFTYPE(iovb) = BUF_NONE; + } else if (--card->iovpool.count < card->iovnr.min) { + struct sk_buff *new_iovb; + if ((new_iovb = + alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { + NS_PRV_BUFTYPE(iovb) = BUF_NONE; + skb_queue_tail(&card->iovpool.queue, new_iovb); + card->iovpool.count++; + } + } + vc->rx_iov = iovb; + NS_PRV_IOVCNT(iovb) = 0; + iovb->len = 0; + iovb->data = iovb->head; + skb_reset_tail_pointer(iovb); + /* IMPORTANT: a pointer to the sk_buff containing the small or large + buffer is stored as iovec base, NOT a pointer to the + small or large buffer itself. */ + } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { + printk("nicstar%d: received too big AAL5 SDU.\n", card->index); + atomic_inc(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_MAX_IOVECS); + NS_PRV_IOVCNT(iovb) = 0; + iovb->len = 0; + iovb->data = iovb->head; + skb_reset_tail_pointer(iovb); + } + iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; + iov->iov_base = (void *)skb; + iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; + iovb->len += iov->iov_len; +#ifdef EXTRA_DEBUG + if (NS_PRV_IOVCNT(iovb) == 1) { + if (NS_PRV_BUFTYPE(skb) != BUF_SM) { + printk + ("nicstar%d: Expected a small buffer, and this is not one.\n", + card->index); + which_list(card, skb); + atomic_inc(&vcc->stats->rx_err); + recycle_rx_buf(card, skb); + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); + return; + } + } else { /* NS_PRV_IOVCNT(iovb) >= 2 */ + + if (NS_PRV_BUFTYPE(skb) != BUF_LG) { + printk + ("nicstar%d: Expected a large buffer, and this is not one.\n", + card->index); + which_list(card, skb); + atomic_inc(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_PRV_IOVCNT(iovb)); + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); + return; + } + } +#endif /* EXTRA_DEBUG */ -static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) -{ - u32 vpi, vci; - vc_map *vc; - struct sk_buff *iovb; - struct iovec *iov; - struct atm_vcc *vcc; - struct sk_buff *skb; - unsigned short aal5_len; - int len; - u32 stat; - - stat = readl(card->membase + STAT); - card->sbfqc = ns_stat_sfbqc_get(stat); - card->lbfqc = ns_stat_lfbqc_get(stat); - - skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle); - vpi = ns_rsqe_vpi(rsqe); - vci = ns_rsqe_vci(rsqe); - if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) - { - printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", - card->index, vpi, vci); - recycle_rx_buf(card, skb); - return; - } - - vc = &(card->vcmap[vpi << card->vcibits | vci]); - if (!vc->rx) - { - RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", - card->index, vpi, vci); - recycle_rx_buf(card, skb); - return; - } - - vcc = vc->rx_vcc; - - if (vcc->qos.aal == ATM_AAL0) - { - struct sk_buff *sb; - unsigned char *cell; - int i; - - cell = skb->data; - for (i = ns_rsqe_cellcount(rsqe); i; i--) - { - if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) - { - printk("nicstar%d: Can't allocate buffers for aal0.\n", - card->index); - atomic_add(i,&vcc->stats->rx_drop); - break; - } - if (!atm_charge(vcc, sb->truesize)) - { - RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", - card->index); - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ - dev_kfree_skb_any(sb); - break; - } - /* Rebuild the header */ - *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | - (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); - if (i == 1 && ns_rsqe_eopdu(rsqe)) - *((u32 *) sb->data) |= 0x00000002; - skb_put(sb, NS_AAL0_HEADER); - memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); - skb_put(sb, ATM_CELL_PAYLOAD); - ATM_SKB(sb)->vcc = vcc; - __net_timestamp(sb); - vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); - cell += ATM_CELL_PAYLOAD; - } - - recycle_rx_buf(card, skb); - return; - } - - /* To reach this point, the AAL layer can only be AAL5 */ - - if ((iovb = vc->rx_iov) == NULL) - { - iovb = skb_dequeue(&(card->iovpool.queue)); - if (iovb == NULL) /* No buffers in the queue */ - { - iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); - if (iovb == NULL) - { - printk("nicstar%d: Out of iovec buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); - recycle_rx_buf(card, skb); - return; - } - NS_SKB_CB(iovb)->buf_type = BUF_NONE; - } - else - if (--card->iovpool.count < card->iovnr.min) - { - struct sk_buff *new_iovb; - if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) - { - NS_SKB_CB(iovb)->buf_type = BUF_NONE; - skb_queue_tail(&card->iovpool.queue, new_iovb); - card->iovpool.count++; - } - } - vc->rx_iov = iovb; - NS_SKB(iovb)->iovcnt = 0; - iovb->len = 0; - iovb->data = iovb->head; - skb_reset_tail_pointer(iovb); - NS_SKB(iovb)->vcc = vcc; - /* IMPORTANT: a pointer to the sk_buff containing the small or large - buffer is stored as iovec base, NOT a pointer to the - small or large buffer itself. */ - } - else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) - { - printk("nicstar%d: received too big AAL5 SDU.\n", card->index); - atomic_inc(&vcc->stats->rx_err); - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); - NS_SKB(iovb)->iovcnt = 0; - iovb->len = 0; - iovb->data = iovb->head; - skb_reset_tail_pointer(iovb); - NS_SKB(iovb)->vcc = vcc; - } - iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; - iov->iov_base = (void *) skb; - iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; - iovb->len += iov->iov_len; - - if (NS_SKB(iovb)->iovcnt == 1) - { - if (NS_SKB_CB(skb)->buf_type != BUF_SM) - { - printk("nicstar%d: Expected a small buffer, and this is not one.\n", - card->index); - which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); - recycle_rx_buf(card, skb); - vc->rx_iov = NULL; - recycle_iov_buf(card, iovb); - return; - } - } - else /* NS_SKB(iovb)->iovcnt >= 2 */ - { - if (NS_SKB_CB(skb)->buf_type != BUF_LG) - { - printk("nicstar%d: Expected a large buffer, and this is not one.\n", - card->index); - which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, - NS_SKB(iovb)->iovcnt); - vc->rx_iov = NULL; - recycle_iov_buf(card, iovb); - return; - } - } - - if (ns_rsqe_eopdu(rsqe)) - { - /* This works correctly regardless of the endianness of the host */ - unsigned char *L1L2 = (unsigned char *)((u32)skb->data + - iov->iov_len - 6); - aal5_len = L1L2[0] << 8 | L1L2[1]; - len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; - if (ns_rsqe_crcerr(rsqe) || - len + 8 > iovb->len || len + (47 + 8) < iovb->len) - { - printk("nicstar%d: AAL5 CRC error", card->index); - if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) - printk(" - PDU size mismatch.\n"); - else - printk(".\n"); - atomic_inc(&vcc->stats->rx_err); - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, - NS_SKB(iovb)->iovcnt); - vc->rx_iov = NULL; - recycle_iov_buf(card, iovb); - return; - } - - /* By this point we (hopefully) have a complete SDU without errors. */ - - if (NS_SKB(iovb)->iovcnt == 1) /* Just a small buffer */ - { - /* skb points to a small buffer */ - if (!atm_charge(vcc, skb->truesize)) - { - push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); - } - else - { - skb_put(skb, len); - dequeue_sm_buf(card, skb); + if (ns_rsqe_eopdu(rsqe)) { + /* This works correctly regardless of the endianness of the host */ + unsigned char *L1L2 = (unsigned char *) + (skb->data + iov->iov_len - 6); + aal5_len = L1L2[0] << 8 | L1L2[1]; + len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; + if (ns_rsqe_crcerr(rsqe) || + len + 8 > iovb->len || len + (47 + 8) < iovb->len) { + printk("nicstar%d: AAL5 CRC error", card->index); + if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) + printk(" - PDU size mismatch.\n"); + else + printk(".\n"); + atomic_inc(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_PRV_IOVCNT(iovb)); + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); + return; + } + + /* By this point we (hopefully) have a complete SDU without errors. */ + + if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */ + /* skb points to a small buffer */ + if (!atm_charge(vcc, skb->truesize)) { + push_rxbufs(card, skb); + atomic_inc(&vcc->stats->rx_drop); + } else { + skb_put(skb, len); + dequeue_sm_buf(card, skb); #ifdef NS_USE_DESTRUCTORS - skb->destructor = ns_sb_destructor; + skb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ - ATM_SKB(skb)->vcc = vcc; - __net_timestamp(skb); - vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); - } - } - else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ - { - struct sk_buff *sb; - - sb = (struct sk_buff *) (iov - 1)->iov_base; - /* skb points to a large buffer */ - - if (len <= NS_SMBUFSIZE) - { - if (!atm_charge(vcc, sb->truesize)) - { - push_rxbufs(card, sb); - atomic_inc(&vcc->stats->rx_drop); - } - else - { - skb_put(sb, len); - dequeue_sm_buf(card, sb); + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); + atomic_inc(&vcc->stats->rx); + } + } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ + struct sk_buff *sb; + + sb = (struct sk_buff *)(iov - 1)->iov_base; + /* skb points to a large buffer */ + + if (len <= NS_SMBUFSIZE) { + if (!atm_charge(vcc, sb->truesize)) { + push_rxbufs(card, sb); + atomic_inc(&vcc->stats->rx_drop); + } else { + skb_put(sb, len); + dequeue_sm_buf(card, sb); #ifdef NS_USE_DESTRUCTORS - sb->destructor = ns_sb_destructor; + sb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ - ATM_SKB(sb)->vcc = vcc; - __net_timestamp(sb); - vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); - } - - push_rxbufs(card, skb); - - } - else /* len > NS_SMBUFSIZE, the usual case */ - { - if (!atm_charge(vcc, skb->truesize)) - { - push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); - } - else - { - dequeue_lg_buf(card, skb); + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); + atomic_inc(&vcc->stats->rx); + } + + push_rxbufs(card, skb); + + } else { /* len > NS_SMBUFSIZE, the usual case */ + + if (!atm_charge(vcc, skb->truesize)) { + push_rxbufs(card, skb); + atomic_inc(&vcc->stats->rx_drop); + } else { + dequeue_lg_buf(card, skb); #ifdef NS_USE_DESTRUCTORS - skb->destructor = ns_lb_destructor; + skb->destructor = ns_lb_destructor; #endif /* NS_USE_DESTRUCTORS */ - skb_push(skb, NS_SMBUFSIZE); - skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); - skb_put(skb, len - NS_SMBUFSIZE); - ATM_SKB(skb)->vcc = vcc; - __net_timestamp(skb); - vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); - } - - push_rxbufs(card, sb); - - } - - } - else /* Must push a huge buffer */ - { - struct sk_buff *hb, *sb, *lb; - int remaining, tocopy; - int j; - - hb = skb_dequeue(&(card->hbpool.queue)); - if (hb == NULL) /* No buffers in the queue */ - { - - hb = dev_alloc_skb(NS_HBUFSIZE); - if (hb == NULL) - { - printk("nicstar%d: Out of huge buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); - recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, - NS_SKB(iovb)->iovcnt); - vc->rx_iov = NULL; - recycle_iov_buf(card, iovb); - return; - } - else if (card->hbpool.count < card->hbnr.min) - { - struct sk_buff *new_hb; - if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) - { - skb_queue_tail(&card->hbpool.queue, new_hb); - card->hbpool.count++; - } - } - NS_SKB_CB(hb)->buf_type = BUF_NONE; - } - else - if (--card->hbpool.count < card->hbnr.min) - { - struct sk_buff *new_hb; - if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) - { - NS_SKB_CB(new_hb)->buf_type = BUF_NONE; - skb_queue_tail(&card->hbpool.queue, new_hb); - card->hbpool.count++; - } - if (card->hbpool.count < card->hbnr.min) - { - if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) - { - NS_SKB_CB(new_hb)->buf_type = BUF_NONE; - skb_queue_tail(&card->hbpool.queue, new_hb); - card->hbpool.count++; - } - } - } - - iov = (struct iovec *) iovb->data; - - if (!atm_charge(vcc, hb->truesize)) - { - recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt); - if (card->hbpool.count < card->hbnr.max) - { - skb_queue_tail(&card->hbpool.queue, hb); - card->hbpool.count++; - } - else - dev_kfree_skb_any(hb); - atomic_inc(&vcc->stats->rx_drop); - } - else - { - /* Copy the small buffer to the huge buffer */ - sb = (struct sk_buff *) iov->iov_base; - skb_copy_from_linear_data(sb, hb->data, iov->iov_len); - skb_put(hb, iov->iov_len); - remaining = len - iov->iov_len; - iov++; - /* Free the small buffer */ - push_rxbufs(card, sb); - - /* Copy all large buffers to the huge buffer and free them */ - for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) - { - lb = (struct sk_buff *) iov->iov_base; - tocopy = min_t(int, remaining, iov->iov_len); - skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); - skb_put(hb, tocopy); - iov++; - remaining -= tocopy; - push_rxbufs(card, lb); - } + skb_push(skb, NS_SMBUFSIZE); + skb_copy_from_linear_data(sb, skb->data, + NS_SMBUFSIZE); + skb_put(skb, len - NS_SMBUFSIZE); + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); + atomic_inc(&vcc->stats->rx); + } + + push_rxbufs(card, sb); + + } + + } else { /* Must push a huge buffer */ + + struct sk_buff *hb, *sb, *lb; + int remaining, tocopy; + int j; + + hb = skb_dequeue(&(card->hbpool.queue)); + if (hb == NULL) { /* No buffers in the queue */ + + hb = dev_alloc_skb(NS_HBUFSIZE); + if (hb == NULL) { + printk + ("nicstar%d: Out of huge buffers.\n", + card->index); + atomic_inc(&vcc->stats->rx_drop); + recycle_iovec_rx_bufs(card, + (struct iovec *) + iovb->data, + NS_PRV_IOVCNT(iovb)); + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); + return; + } else if (card->hbpool.count < card->hbnr.min) { + struct sk_buff *new_hb; + if ((new_hb = + dev_alloc_skb(NS_HBUFSIZE)) != + NULL) { + skb_queue_tail(&card->hbpool. + queue, new_hb); + card->hbpool.count++; + } + } + NS_PRV_BUFTYPE(hb) = BUF_NONE; + } else if (--card->hbpool.count < card->hbnr.min) { + struct sk_buff *new_hb; + if ((new_hb = + dev_alloc_skb(NS_HBUFSIZE)) != NULL) { + NS_PRV_BUFTYPE(new_hb) = BUF_NONE; + skb_queue_tail(&card->hbpool.queue, + new_hb); + card->hbpool.count++; + } + if (card->hbpool.count < card->hbnr.min) { + if ((new_hb = + dev_alloc_skb(NS_HBUFSIZE)) != + NULL) { + NS_PRV_BUFTYPE(new_hb) = + BUF_NONE; + skb_queue_tail(&card->hbpool. + queue, new_hb); + card->hbpool.count++; + } + } + } + + iov = (struct iovec *)iovb->data; + + if (!atm_charge(vcc, hb->truesize)) { + recycle_iovec_rx_bufs(card, iov, + NS_PRV_IOVCNT(iovb)); + if (card->hbpool.count < card->hbnr.max) { + skb_queue_tail(&card->hbpool.queue, hb); + card->hbpool.count++; + } else + dev_kfree_skb_any(hb); + atomic_inc(&vcc->stats->rx_drop); + } else { + /* Copy the small buffer to the huge buffer */ + sb = (struct sk_buff *)iov->iov_base; + skb_copy_from_linear_data(sb, hb->data, + iov->iov_len); + skb_put(hb, iov->iov_len); + remaining = len - iov->iov_len; + iov++; + /* Free the small buffer */ + push_rxbufs(card, sb); + + /* Copy all large buffers to the huge buffer and free them */ + for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { + lb = (struct sk_buff *)iov->iov_base; + tocopy = + min_t(int, remaining, iov->iov_len); + skb_copy_from_linear_data(lb, + skb_tail_pointer + (hb), tocopy); + skb_put(hb, tocopy); + iov++; + remaining -= tocopy; + push_rxbufs(card, lb); + } #ifdef EXTRA_DEBUG - if (remaining != 0 || hb->len != len) - printk("nicstar%d: Huge buffer len mismatch.\n", card->index); + if (remaining != 0 || hb->len != len) + printk + ("nicstar%d: Huge buffer len mismatch.\n", + card->index); #endif /* EXTRA_DEBUG */ - ATM_SKB(hb)->vcc = vcc; + ATM_SKB(hb)->vcc = vcc; #ifdef NS_USE_DESTRUCTORS - hb->destructor = ns_hb_destructor; + hb->destructor = ns_hb_destructor; #endif /* NS_USE_DESTRUCTORS */ - __net_timestamp(hb); - vcc->push(vcc, hb); - atomic_inc(&vcc->stats->rx); - } - } + __net_timestamp(hb); + vcc->push(vcc, hb); + atomic_inc(&vcc->stats->rx); + } + } - vc->rx_iov = NULL; - recycle_iov_buf(card, iovb); - } + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); + } } - - #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb) { - ns_dev *card; - u32 stat; - - card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; - stat = readl(card->membase + STAT); - card->sbfqc = ns_stat_sfbqc_get(stat); - card->lbfqc = ns_stat_lfbqc_get(stat); - - do - { - sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); - if (sb == NULL) - break; - NS_SKB_CB(sb)->buf_type = BUF_SM; - skb_queue_tail(&card->sbpool.queue, sb); - skb_reserve(sb, NS_AAL0_HEADER); - push_rxbufs(card, sb); - } while (card->sbfqc < card->sbnr.min); + ns_dev *card; + u32 stat; + + card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; + stat = readl(card->membase + STAT); + card->sbfqc = ns_stat_sfbqc_get(stat); + card->lbfqc = ns_stat_lfbqc_get(stat); + + do { + sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); + if (sb == NULL) + break; + NS_PRV_BUFTYPE(sb) = BUF_SM; + skb_queue_tail(&card->sbpool.queue, sb); + skb_reserve(sb, NS_AAL0_HEADER); + push_rxbufs(card, sb); + } while (card->sbfqc < card->sbnr.min); } - - static void ns_lb_destructor(struct sk_buff *lb) { - ns_dev *card; - u32 stat; - - card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; - stat = readl(card->membase + STAT); - card->sbfqc = ns_stat_sfbqc_get(stat); - card->lbfqc = ns_stat_lfbqc_get(stat); - - do - { - lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); - if (lb == NULL) - break; - NS_SKB_CB(lb)->buf_type = BUF_LG; - skb_queue_tail(&card->lbpool.queue, lb); - skb_reserve(lb, NS_SMBUFSIZE); - push_rxbufs(card, lb); - } while (card->lbfqc < card->lbnr.min); + ns_dev *card; + u32 stat; + + card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; + stat = readl(card->membase + STAT); + card->sbfqc = ns_stat_sfbqc_get(stat); + card->lbfqc = ns_stat_lfbqc_get(stat); + + do { + lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); + if (lb == NULL) + break; + NS_PRV_BUFTYPE(lb) = BUF_LG; + skb_queue_tail(&card->lbpool.queue, lb); + skb_reserve(lb, NS_SMBUFSIZE); + push_rxbufs(card, lb); + } while (card->lbfqc < card->lbnr.min); } - - static void ns_hb_destructor(struct sk_buff *hb) { - ns_dev *card; - - card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; - - while (card->hbpool.count < card->hbnr.init) - { - hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); - if (hb == NULL) - break; - NS_SKB_CB(hb)->buf_type = BUF_NONE; - skb_queue_tail(&card->hbpool.queue, hb); - card->hbpool.count++; - } + ns_dev *card; + + card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; + + while (card->hbpool.count < card->hbnr.init) { + hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); + if (hb == NULL) + break; + NS_PRV_BUFTYPE(hb) = BUF_NONE; + skb_queue_tail(&card->hbpool.queue, hb); + card->hbpool.count++; + } } #endif /* NS_USE_DESTRUCTORS */ - -static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) +static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) { - struct ns_skb_cb *cb = NS_SKB_CB(skb); - - if (unlikely(cb->buf_type == BUF_NONE)) { - printk("nicstar%d: What kind of rx buffer is this?\n", card->index); + if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { + printk("nicstar%d: What kind of rx buffer is this?\n", + card->index); dev_kfree_skb_any(skb); } else push_rxbufs(card, skb); } - -static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) +static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) { while (count-- > 0) - recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base); + recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); } - -static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) +static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) { - if (card->iovpool.count < card->iovnr.max) - { - skb_queue_tail(&card->iovpool.queue, iovb); - card->iovpool.count++; - } - else - dev_kfree_skb_any(iovb); + if (card->iovpool.count < card->iovnr.max) { + skb_queue_tail(&card->iovpool.queue, iovb); + card->iovpool.count++; + } else + dev_kfree_skb_any(iovb); } - - -static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) +static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) { - skb_unlink(sb, &card->sbpool.queue); + skb_unlink(sb, &card->sbpool.queue); #ifdef NS_USE_DESTRUCTORS - if (card->sbfqc < card->sbnr.min) + if (card->sbfqc < card->sbnr.min) #else - if (card->sbfqc < card->sbnr.init) - { - struct sk_buff *new_sb; - if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) - { - NS_SKB_CB(new_sb)->buf_type = BUF_SM; - skb_queue_tail(&card->sbpool.queue, new_sb); - skb_reserve(new_sb, NS_AAL0_HEADER); - push_rxbufs(card, new_sb); - } - } - if (card->sbfqc < card->sbnr.init) + if (card->sbfqc < card->sbnr.init) { + struct sk_buff *new_sb; + if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { + NS_PRV_BUFTYPE(new_sb) = BUF_SM; + skb_queue_tail(&card->sbpool.queue, new_sb); + skb_reserve(new_sb, NS_AAL0_HEADER); + push_rxbufs(card, new_sb); + } + } + if (card->sbfqc < card->sbnr.init) #endif /* NS_USE_DESTRUCTORS */ - { - struct sk_buff *new_sb; - if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) - { - NS_SKB_CB(new_sb)->buf_type = BUF_SM; - skb_queue_tail(&card->sbpool.queue, new_sb); - skb_reserve(new_sb, NS_AAL0_HEADER); - push_rxbufs(card, new_sb); - } - } + { + struct sk_buff *new_sb; + if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { + NS_PRV_BUFTYPE(new_sb) = BUF_SM; + skb_queue_tail(&card->sbpool.queue, new_sb); + skb_reserve(new_sb, NS_AAL0_HEADER); + push_rxbufs(card, new_sb); + } + } } - - -static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) +static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) { - skb_unlink(lb, &card->lbpool.queue); + skb_unlink(lb, &card->lbpool.queue); #ifdef NS_USE_DESTRUCTORS - if (card->lbfqc < card->lbnr.min) + if (card->lbfqc < card->lbnr.min) #else - if (card->lbfqc < card->lbnr.init) - { - struct sk_buff *new_lb; - if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) - { - NS_SKB_CB(new_lb)->buf_type = BUF_LG; - skb_queue_tail(&card->lbpool.queue, new_lb); - skb_reserve(new_lb, NS_SMBUFSIZE); - push_rxbufs(card, new_lb); - } - } - if (card->lbfqc < card->lbnr.init) + if (card->lbfqc < card->lbnr.init) { + struct sk_buff *new_lb; + if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { + NS_PRV_BUFTYPE(new_lb) = BUF_LG; + skb_queue_tail(&card->lbpool.queue, new_lb); + skb_reserve(new_lb, NS_SMBUFSIZE); + push_rxbufs(card, new_lb); + } + } + if (card->lbfqc < card->lbnr.init) #endif /* NS_USE_DESTRUCTORS */ - { - struct sk_buff *new_lb; - if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) - { - NS_SKB_CB(new_lb)->buf_type = BUF_LG; - skb_queue_tail(&card->lbpool.queue, new_lb); - skb_reserve(new_lb, NS_SMBUFSIZE); - push_rxbufs(card, new_lb); - } - } + { + struct sk_buff *new_lb; + if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { + NS_PRV_BUFTYPE(new_lb) = BUF_LG; + skb_queue_tail(&card->lbpool.queue, new_lb); + skb_reserve(new_lb, NS_SMBUFSIZE); + push_rxbufs(card, new_lb); + } + } } - - -static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page) +static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) { - u32 stat; - ns_dev *card; - int left; - - left = (int) *pos; - card = (ns_dev *) dev->dev_data; - stat = readl(card->membase + STAT); - if (!left--) - return sprintf(page, "Pool count min init max \n"); - if (!left--) - return sprintf(page, "Small %5d %5d %5d %5d \n", - ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, - card->sbnr.max); - if (!left--) - return sprintf(page, "Large %5d %5d %5d %5d \n", - ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, - card->lbnr.max); - if (!left--) - return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, - card->hbnr.min, card->hbnr.init, card->hbnr.max); - if (!left--) - return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, - card->iovnr.min, card->iovnr.init, card->iovnr.max); - if (!left--) - { - int retval; - retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); - card->intcnt = 0; - return retval; - } + u32 stat; + ns_dev *card; + int left; + + left = (int)*pos; + card = (ns_dev *) dev->dev_data; + stat = readl(card->membase + STAT); + if (!left--) + return sprintf(page, "Pool count min init max \n"); + if (!left--) + return sprintf(page, "Small %5d %5d %5d %5d \n", + ns_stat_sfbqc_get(stat), card->sbnr.min, + card->sbnr.init, card->sbnr.max); + if (!left--) + return sprintf(page, "Large %5d %5d %5d %5d \n", + ns_stat_lfbqc_get(stat), card->lbnr.min, + card->lbnr.init, card->lbnr.max); + if (!left--) + return sprintf(page, "Huge %5d %5d %5d %5d \n", + card->hbpool.count, card->hbnr.min, + card->hbnr.init, card->hbnr.max); + if (!left--) + return sprintf(page, "Iovec %5d %5d %5d %5d \n", + card->iovpool.count, card->iovnr.min, + card->iovnr.init, card->iovnr.max); + if (!left--) { + int retval; + retval = + sprintf(page, "Interrupt counter: %u \n", card->intcnt); + card->intcnt = 0; + return retval; + } #if 0 - /* Dump 25.6 Mbps PHY registers */ - /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it - here just in case it's needed for debugging. */ - if (card->max_pcr == ATM_25_PCR && !left--) - { - u32 phy_regs[4]; - u32 i; - - for (i = 0; i < 4; i++) - { - while (CMD_BUSY(card)); - writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); - while (CMD_BUSY(card)); - phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; - } - - return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", - phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); - } + /* Dump 25.6 Mbps PHY registers */ + /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it + here just in case it's needed for debugging. */ + if (card->max_pcr == ATM_25_PCR && !left--) { + u32 phy_regs[4]; + u32 i; + + for (i = 0; i < 4; i++) { + while (CMD_BUSY(card)) ; + writel(NS_CMD_READ_UTILITY | 0x00000200 | i, + card->membase + CMD); + while (CMD_BUSY(card)) ; + phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; + } + + return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", + phy_regs[0], phy_regs[1], phy_regs[2], + phy_regs[3]); + } #endif /* 0 - Dump 25.6 Mbps PHY registers */ #if 0 - /* Dump TST */ - if (left-- < NS_TST_NUM_ENTRIES) - { - if (card->tste2vc[left + 1] == NULL) - return sprintf(page, "%5d - VBR/UBR \n", left + 1); - else - return sprintf(page, "%5d - %d %d \n", left + 1, - card->tste2vc[left + 1]->tx_vcc->vpi, - card->tste2vc[left + 1]->tx_vcc->vci); - } + /* Dump TST */ + if (left-- < NS_TST_NUM_ENTRIES) { + if (card->tste2vc[left + 1] == NULL) + return sprintf(page, "%5d - VBR/UBR \n", left + 1); + else + return sprintf(page, "%5d - %d %d \n", left + 1, + card->tste2vc[left + 1]->tx_vcc->vpi, + card->tste2vc[left + 1]->tx_vcc->vci); + } #endif /* 0 */ - return 0; + return 0; } - - -static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) +static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) { - ns_dev *card; - pool_levels pl; - long btype; - unsigned long flags; - - card = dev->dev_data; - switch (cmd) - { - case NS_GETPSTAT: - if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype)) - return -EFAULT; - switch (pl.buftype) - { - case NS_BUFTYPE_SMALL: - pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); - pl.level.min = card->sbnr.min; - pl.level.init = card->sbnr.init; - pl.level.max = card->sbnr.max; - break; - - case NS_BUFTYPE_LARGE: - pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); - pl.level.min = card->lbnr.min; - pl.level.init = card->lbnr.init; - pl.level.max = card->lbnr.max; - break; - - case NS_BUFTYPE_HUGE: - pl.count = card->hbpool.count; - pl.level.min = card->hbnr.min; - pl.level.init = card->hbnr.init; - pl.level.max = card->hbnr.max; - break; - - case NS_BUFTYPE_IOVEC: - pl.count = card->iovpool.count; - pl.level.min = card->iovnr.min; - pl.level.init = card->iovnr.init; - pl.level.max = card->iovnr.max; - break; - - default: - return -ENOIOCTLCMD; - - } - if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) - return (sizeof(pl)); - else - return -EFAULT; - - case NS_SETBUFLEV: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) - return -EFAULT; - if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) - return -EINVAL; - if (pl.level.min == 0) - return -EINVAL; - switch (pl.buftype) - { - case NS_BUFTYPE_SMALL: - if (pl.level.max > TOP_SB) - return -EINVAL; - card->sbnr.min = pl.level.min; - card->sbnr.init = pl.level.init; - card->sbnr.max = pl.level.max; - break; - - case NS_BUFTYPE_LARGE: - if (pl.level.max > TOP_LB) - return -EINVAL; - card->lbnr.min = pl.level.min; - card->lbnr.init = pl.level.init; - card->lbnr.max = pl.level.max; - break; - - case NS_BUFTYPE_HUGE: - if (pl.level.max > TOP_HB) - return -EINVAL; - card->hbnr.min = pl.level.min; - card->hbnr.init = pl.level.init; - card->hbnr.max = pl.level.max; - break; - - case NS_BUFTYPE_IOVEC: - if (pl.level.max > TOP_IOVB) - return -EINVAL; - card->iovnr.min = pl.level.min; - card->iovnr.init = pl.level.init; - card->iovnr.max = pl.level.max; - break; - - default: - return -EINVAL; - - } - return 0; - - case NS_ADJBUFLEV: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - btype = (long) arg; /* a long is the same size as a pointer or bigger */ - switch (btype) - { - case NS_BUFTYPE_SMALL: - while (card->sbfqc < card->sbnr.init) - { - struct sk_buff *sb; - - sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); - if (sb == NULL) - return -ENOMEM; - NS_SKB_CB(sb)->buf_type = BUF_SM; - skb_queue_tail(&card->sbpool.queue, sb); - skb_reserve(sb, NS_AAL0_HEADER); - push_rxbufs(card, sb); - } - break; - - case NS_BUFTYPE_LARGE: - while (card->lbfqc < card->lbnr.init) - { - struct sk_buff *lb; - - lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); - if (lb == NULL) - return -ENOMEM; - NS_SKB_CB(lb)->buf_type = BUF_LG; - skb_queue_tail(&card->lbpool.queue, lb); - skb_reserve(lb, NS_SMBUFSIZE); - push_rxbufs(card, lb); - } - break; - - case NS_BUFTYPE_HUGE: - while (card->hbpool.count > card->hbnr.init) - { - struct sk_buff *hb; - - spin_lock_irqsave(&card->int_lock, flags); - hb = skb_dequeue(&card->hbpool.queue); - card->hbpool.count--; - spin_unlock_irqrestore(&card->int_lock, flags); - if (hb == NULL) - printk("nicstar%d: huge buffer count inconsistent.\n", - card->index); - else - dev_kfree_skb_any(hb); - - } - while (card->hbpool.count < card->hbnr.init) - { - struct sk_buff *hb; - - hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); - if (hb == NULL) - return -ENOMEM; - NS_SKB_CB(hb)->buf_type = BUF_NONE; - spin_lock_irqsave(&card->int_lock, flags); - skb_queue_tail(&card->hbpool.queue, hb); - card->hbpool.count++; - spin_unlock_irqrestore(&card->int_lock, flags); - } - break; - - case NS_BUFTYPE_IOVEC: - while (card->iovpool.count > card->iovnr.init) - { - struct sk_buff *iovb; - - spin_lock_irqsave(&card->int_lock, flags); - iovb = skb_dequeue(&card->iovpool.queue); - card->iovpool.count--; - spin_unlock_irqrestore(&card->int_lock, flags); - if (iovb == NULL) - printk("nicstar%d: iovec buffer count inconsistent.\n", - card->index); - else - dev_kfree_skb_any(iovb); - - } - while (card->iovpool.count < card->iovnr.init) - { - struct sk_buff *iovb; - - iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); - if (iovb == NULL) - return -ENOMEM; - NS_SKB_CB(iovb)->buf_type = BUF_NONE; - spin_lock_irqsave(&card->int_lock, flags); - skb_queue_tail(&card->iovpool.queue, iovb); - card->iovpool.count++; - spin_unlock_irqrestore(&card->int_lock, flags); - } - break; - - default: - return -EINVAL; - - } - return 0; - - default: - if (dev->phy && dev->phy->ioctl) { - return dev->phy->ioctl(dev, cmd, arg); - } - else { - printk("nicstar%d: %s == NULL \n", card->index, - dev->phy ? "dev->phy->ioctl" : "dev->phy"); - return -ENOIOCTLCMD; - } - } + ns_dev *card; + pool_levels pl; + long btype; + unsigned long flags; + + card = dev->dev_data; + switch (cmd) { + case NS_GETPSTAT: + if (get_user + (pl.buftype, &((pool_levels __user *) arg)->buftype)) + return -EFAULT; + switch (pl.buftype) { + case NS_BUFTYPE_SMALL: + pl.count = + ns_stat_sfbqc_get(readl(card->membase + STAT)); + pl.level.min = card->sbnr.min; + pl.level.init = card->sbnr.init; + pl.level.max = card->sbnr.max; + break; + + case NS_BUFTYPE_LARGE: + pl.count = + ns_stat_lfbqc_get(readl(card->membase + STAT)); + pl.level.min = card->lbnr.min; + pl.level.init = card->lbnr.init; + pl.level.max = card->lbnr.max; + break; + + case NS_BUFTYPE_HUGE: + pl.count = card->hbpool.count; + pl.level.min = card->hbnr.min; + pl.level.init = card->hbnr.init; + pl.level.max = card->hbnr.max; + break; + + case NS_BUFTYPE_IOVEC: + pl.count = card->iovpool.count; + pl.level.min = card->iovnr.min; + pl.level.init = card->iovnr.init; + pl.level.max = card->iovnr.max; + break; + + default: + return -ENOIOCTLCMD; + + } + if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) + return (sizeof(pl)); + else + return -EFAULT; + + case NS_SETBUFLEV: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) + return -EFAULT; + if (pl.level.min >= pl.level.init + || pl.level.init >= pl.level.max) + return -EINVAL; + if (pl.level.min == 0) + return -EINVAL; + switch (pl.buftype) { + case NS_BUFTYPE_SMALL: + if (pl.level.max > TOP_SB) + return -EINVAL; + card->sbnr.min = pl.level.min; + card->sbnr.init = pl.level.init; + card->sbnr.max = pl.level.max; + break; + + case NS_BUFTYPE_LARGE: + if (pl.level.max > TOP_LB) + return -EINVAL; + card->lbnr.min = pl.level.min; + card->lbnr.init = pl.level.init; + card->lbnr.max = pl.level.max; + break; + + case NS_BUFTYPE_HUGE: + if (pl.level.max > TOP_HB) + return -EINVAL; + card->hbnr.min = pl.level.min; + card->hbnr.init = pl.level.init; + card->hbnr.max = pl.level.max; + break; + + case NS_BUFTYPE_IOVEC: + if (pl.level.max > TOP_IOVB) + return -EINVAL; + card->iovnr.min = pl.level.min; + card->iovnr.init = pl.level.init; + card->iovnr.max = pl.level.max; + break; + + default: + return -EINVAL; + + } + return 0; + + case NS_ADJBUFLEV: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + btype = (long)arg; /* a long is the same size as a pointer or bigger */ + switch (btype) { + case NS_BUFTYPE_SMALL: + while (card->sbfqc < card->sbnr.init) { + struct sk_buff *sb; + + sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); + if (sb == NULL) + return -ENOMEM; + NS_PRV_BUFTYPE(sb) = BUF_SM; + skb_queue_tail(&card->sbpool.queue, sb); + skb_reserve(sb, NS_AAL0_HEADER); + push_rxbufs(card, sb); + } + break; + + case NS_BUFTYPE_LARGE: + while (card->lbfqc < card->lbnr.init) { + struct sk_buff *lb; + + lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); + if (lb == NULL) + return -ENOMEM; + NS_PRV_BUFTYPE(lb) = BUF_LG; + skb_queue_tail(&card->lbpool.queue, lb); + skb_reserve(lb, NS_SMBUFSIZE); + push_rxbufs(card, lb); + } + break; + + case NS_BUFTYPE_HUGE: + while (card->hbpool.count > card->hbnr.init) { + struct sk_buff *hb; + + spin_lock_irqsave(&card->int_lock, flags); + hb = skb_dequeue(&card->hbpool.queue); + card->hbpool.count--; + spin_unlock_irqrestore(&card->int_lock, flags); + if (hb == NULL) + printk + ("nicstar%d: huge buffer count inconsistent.\n", + card->index); + else + dev_kfree_skb_any(hb); + + } + while (card->hbpool.count < card->hbnr.init) { + struct sk_buff *hb; + + hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); + if (hb == NULL) + return -ENOMEM; + NS_PRV_BUFTYPE(hb) = BUF_NONE; + spin_lock_irqsave(&card->int_lock, flags); + skb_queue_tail(&card->hbpool.queue, hb); + card->hbpool.count++; + spin_unlock_irqrestore(&card->int_lock, flags); + } + break; + + case NS_BUFTYPE_IOVEC: + while (card->iovpool.count > card->iovnr.init) { + struct sk_buff *iovb; + + spin_lock_irqsave(&card->int_lock, flags); + iovb = skb_dequeue(&card->iovpool.queue); + card->iovpool.count--; + spin_unlock_irqrestore(&card->int_lock, flags); + if (iovb == NULL) + printk + ("nicstar%d: iovec buffer count inconsistent.\n", + card->index); + else + dev_kfree_skb_any(iovb); + + } + while (card->iovpool.count < card->iovnr.init) { + struct sk_buff *iovb; + + iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); + if (iovb == NULL) + return -ENOMEM; + NS_PRV_BUFTYPE(iovb) = BUF_NONE; + spin_lock_irqsave(&card->int_lock, flags); + skb_queue_tail(&card->iovpool.queue, iovb); + card->iovpool.count++; + spin_unlock_irqrestore(&card->int_lock, flags); + } + break; + + default: + return -EINVAL; + + } + return 0; + + default: + if (dev->phy && dev->phy->ioctl) { + return dev->phy->ioctl(dev, cmd, arg); + } else { + printk("nicstar%d: %s == NULL \n", card->index, + dev->phy ? "dev->phy->ioctl" : "dev->phy"); + return -ENOIOCTLCMD; + } + } } - -static void which_list(ns_dev *card, struct sk_buff *skb) +#ifdef EXTRA_DEBUG +static void which_list(ns_dev * card, struct sk_buff *skb) { - printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type); + printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); } - +#endif /* EXTRA_DEBUG */ static void ns_poll(unsigned long arg) { - int i; - ns_dev *card; - unsigned long flags; - u32 stat_r, stat_w; - - PRINTK("nicstar: Entering ns_poll().\n"); - for (i = 0; i < num_cards; i++) - { - card = cards[i]; - if (spin_is_locked(&card->int_lock)) { - /* Probably it isn't worth spinning */ - continue; - } - spin_lock_irqsave(&card->int_lock, flags); - - stat_w = 0; - stat_r = readl(card->membase + STAT); - if (stat_r & NS_STAT_TSIF) - stat_w |= NS_STAT_TSIF; - if (stat_r & NS_STAT_EOPDU) - stat_w |= NS_STAT_EOPDU; - - process_tsq(card); - process_rsq(card); - - writel(stat_w, card->membase + STAT); - spin_unlock_irqrestore(&card->int_lock, flags); - } - mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); - PRINTK("nicstar: Leaving ns_poll().\n"); + int i; + ns_dev *card; + unsigned long flags; + u32 stat_r, stat_w; + + PRINTK("nicstar: Entering ns_poll().\n"); + for (i = 0; i < num_cards; i++) { + card = cards[i]; + if (spin_is_locked(&card->int_lock)) { + /* Probably it isn't worth spinning */ + continue; + } + spin_lock_irqsave(&card->int_lock, flags); + + stat_w = 0; + stat_r = readl(card->membase + STAT); + if (stat_r & NS_STAT_TSIF) + stat_w |= NS_STAT_TSIF; + if (stat_r & NS_STAT_EOPDU) + stat_w |= NS_STAT_EOPDU; + + process_tsq(card); + process_rsq(card); + + writel(stat_w, card->membase + STAT); + spin_unlock_irqrestore(&card->int_lock, flags); + } + mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); + PRINTK("nicstar: Leaving ns_poll().\n"); } - - static int ns_parse_mac(char *mac, unsigned char *esi) { - int i, j; - short byte1, byte0; - - if (mac == NULL || esi == NULL) - return -1; - j = 0; - for (i = 0; i < 6; i++) - { - if ((byte1 = ns_h2i(mac[j++])) < 0) - return -1; - if ((byte0 = ns_h2i(mac[j++])) < 0) - return -1; - esi[i] = (unsigned char) (byte1 * 16 + byte0); - if (i < 5) - { - if (mac[j++] != ':') - return -1; - } - } - return 0; -} - - - -static short ns_h2i(char c) -{ - if (c >= '0' && c <= '9') - return (short) (c - '0'); - if (c >= 'A' && c <= 'F') - return (short) (c - 'A' + 10); - if (c >= 'a' && c <= 'f') - return (short) (c - 'a' + 10); - return -1; + int i, j; + short byte1, byte0; + + if (mac == NULL || esi == NULL) + return -1; + j = 0; + for (i = 0; i < 6; i++) { + if ((byte1 = hex_to_bin(mac[j++])) < 0) + return -1; + if ((byte0 = hex_to_bin(mac[j++])) < 0) + return -1; + esi[i] = (unsigned char)(byte1 * 16 + byte0); + if (i < 5) { + if (mac[j++] != ':') + return -1; + } + } + return 0; } - static void ns_phy_put(struct atm_dev *dev, unsigned char value, - unsigned long addr) + unsigned long addr) { - ns_dev *card; - unsigned long flags; - - card = dev->dev_data; - spin_lock_irqsave(&card->res_lock, flags); - while(CMD_BUSY(card)); - writel((unsigned long) value, card->membase + DR0); - writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), - card->membase + CMD); - spin_unlock_irqrestore(&card->res_lock, flags); + ns_dev *card; + unsigned long flags; + + card = dev->dev_data; + spin_lock_irqsave(&card->res_lock, flags); + while (CMD_BUSY(card)) ; + writel((u32) value, card->membase + DR0); + writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), + card->membase + CMD); + spin_unlock_irqrestore(&card->res_lock, flags); } - - static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) { - ns_dev *card; - unsigned long flags; - unsigned long data; - - card = dev->dev_data; - spin_lock_irqsave(&card->res_lock, flags); - while(CMD_BUSY(card)); - writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), - card->membase + CMD); - while(CMD_BUSY(card)); - data = readl(card->membase + DR0) & 0x000000FF; - spin_unlock_irqrestore(&card->res_lock, flags); - return (unsigned char) data; + ns_dev *card; + unsigned long flags; + u32 data; + + card = dev->dev_data; + spin_lock_irqsave(&card->res_lock, flags); + while (CMD_BUSY(card)) ; + writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), + card->membase + CMD); + while (CMD_BUSY(card)) ; + data = readl(card->membase + DR0) & 0x000000FF; + spin_unlock_irqrestore(&card->res_lock, flags); + return (unsigned char)data; } - - module_init(nicstar_init); module_exit(nicstar_cleanup); diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h index 6010e3daa6a21fd6e6c5fd23dd32cf9634497593..9bc27ea5088e5776b0a23ce52bbe5c87216151a9 100644 --- a/drivers/atm/nicstar.h +++ b/drivers/atm/nicstar.h @@ -1,5 +1,4 @@ -/****************************************************************************** - * +/* * nicstar.h * * Header file for the nicstar device driver. @@ -8,29 +7,26 @@ * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 * * (C) INESC 1998 - * - ******************************************************************************/ - + */ #ifndef _LINUX_NICSTAR_H_ #define _LINUX_NICSTAR_H_ - -/* Includes *******************************************************************/ +/* Includes */ #include #include +#include #include #include #include #include - -/* Options ********************************************************************/ +/* Options */ #define NS_MAX_CARDS 4 /* Maximum number of NICStAR based cards controlled by the device driver. Must - be <= 5 */ + be <= 5 */ #undef RCQ_SUPPORT /* Do not define this for now */ @@ -43,7 +39,7 @@ #define NS_VPIBITS 2 /* 0, 1, 2, or 8 */ #define NS_MAX_RCTSIZE 4096 /* Number of entries. 4096 or 16384. - Define 4096 only if (all) your card(s) + Define 4096 only if (all) your card(s) have 32K x 32bit SRAM, in which case setting this to 16384 will just waste a lot of memory. @@ -51,33 +47,32 @@ 128K x 32bit SRAM will limit the maximum VCI. */ -/*#define NS_PCI_LATENCY 64*/ /* Must be a multiple of 32 */ + /*#define NS_PCI_LATENCY 64*//* Must be a multiple of 32 */ /* Number of buffers initially allocated */ -#define NUM_SB 32 /* Must be even */ -#define NUM_LB 24 /* Must be even */ -#define NUM_HB 8 /* Pre-allocated huge buffers */ -#define NUM_IOVB 48 /* Iovec buffers */ +#define NUM_SB 32 /* Must be even */ +#define NUM_LB 24 /* Must be even */ +#define NUM_HB 8 /* Pre-allocated huge buffers */ +#define NUM_IOVB 48 /* Iovec buffers */ /* Lower level for count of buffers */ -#define MIN_SB 8 /* Must be even */ -#define MIN_LB 8 /* Must be even */ +#define MIN_SB 8 /* Must be even */ +#define MIN_LB 8 /* Must be even */ #define MIN_HB 6 #define MIN_IOVB 8 /* Upper level for count of buffers */ -#define MAX_SB 64 /* Must be even, <= 508 */ -#define MAX_LB 48 /* Must be even, <= 508 */ +#define MAX_SB 64 /* Must be even, <= 508 */ +#define MAX_LB 48 /* Must be even, <= 508 */ #define MAX_HB 10 #define MAX_IOVB 80 /* These are the absolute maximum allowed for the ioctl() */ -#define TOP_SB 256 /* Must be even, <= 508 */ -#define TOP_LB 128 /* Must be even, <= 508 */ +#define TOP_SB 256 /* Must be even, <= 508 */ +#define TOP_LB 128 /* Must be even, <= 508 */ #define TOP_HB 64 #define TOP_IOVB 256 - #define MAX_TBD_PER_VC 1 /* Number of TBDs before a TSR */ #define MAX_TBD_PER_SCQ 10 /* Only meaningful for variable rate SCQs */ @@ -89,15 +84,12 @@ #define PCR_TOLERANCE (1.0001) - - -/* ESI stuff ******************************************************************/ +/* ESI stuff */ #define NICSTAR_EPROM_MAC_ADDR_OFFSET 0x6C #define NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT 0xF6 - -/* #defines *******************************************************************/ +/* #defines */ #define NS_IOREMAP_SIZE 4096 @@ -123,22 +115,19 @@ #define NS_SMSKBSIZE (NS_SMBUFSIZE + NS_AAL0_HEADER) #define NS_LGSKBSIZE (NS_SMBUFSIZE + NS_LGBUFSIZE) +/* NICStAR structures located in host memory */ -/* NICStAR structures located in host memory **********************************/ - - - -/* RSQ - Receive Status Queue +/* + * RSQ - Receive Status Queue * * Written by the NICStAR, read by the device driver. */ -typedef struct ns_rsqe -{ - u32 word_1; - u32 buffer_handle; - u32 final_aal5_crc32; - u32 word_4; +typedef struct ns_rsqe { + u32 word_1; + u32 buffer_handle; + u32 final_aal5_crc32; + u32 word_4; } ns_rsqe; #define ns_rsqe_vpi(ns_rsqep) \ @@ -175,30 +164,27 @@ typedef struct ns_rsqe #define ns_rsqe_cellcount(ns_rsqep) \ (le32_to_cpu((ns_rsqep)->word_4) & 0x000001FF) #define ns_rsqe_init(ns_rsqep) \ - ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000)) + ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000)) #define NS_RSQ_NUM_ENTRIES (NS_RSQSIZE / 16) #define NS_RSQ_ALIGNMENT NS_RSQSIZE - - -/* RCQ - Raw Cell Queue +/* + * RCQ - Raw Cell Queue * * Written by the NICStAR, read by the device driver. */ -typedef struct cell_payload -{ - u32 word[12]; +typedef struct cell_payload { + u32 word[12]; } cell_payload; -typedef struct ns_rcqe -{ - u32 word_1; - u32 word_2; - u32 word_3; - u32 word_4; - cell_payload payload; +typedef struct ns_rcqe { + u32 word_1; + u32 word_2; + u32 word_3; + u32 word_4; + cell_payload payload; } ns_rcqe; #define NS_RCQE_SIZE 64 /* bytes */ @@ -210,28 +196,25 @@ typedef struct ns_rcqe #define ns_rcqe_nextbufhandle(ns_rcqep) \ (le32_to_cpu((ns_rcqep)->word_2)) - - -/* SCQ - Segmentation Channel Queue +/* + * SCQ - Segmentation Channel Queue * * Written by the device driver, read by the NICStAR. */ -typedef struct ns_scqe -{ - u32 word_1; - u32 word_2; - u32 word_3; - u32 word_4; +typedef struct ns_scqe { + u32 word_1; + u32 word_2; + u32 word_3; + u32 word_4; } ns_scqe; /* NOTE: SCQ entries can be either a TBD (Transmit Buffer Descriptors) - or TSR (Transmit Status Requests) */ + or TSR (Transmit Status Requests) */ #define NS_SCQE_TYPE_TBD 0x00000000 #define NS_SCQE_TYPE_TSR 0x80000000 - #define NS_TBD_EOPDU 0x40000000 #define NS_TBD_AAL0 0x00000000 #define NS_TBD_AAL34 0x04000000 @@ -253,10 +236,9 @@ typedef struct ns_scqe #define ns_tbd_mkword_4(gfc, vpi, vci, pt, clp) \ (cpu_to_le32((gfc) << 28 | (vpi) << 20 | (vci) << 4 | (pt) << 1 | (clp))) - #define NS_TSR_INTENABLE 0x20000000 -#define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */ +#define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */ #define ns_tsr_mkword_1(flags) \ (cpu_to_le32(NS_SCQE_TYPE_TSR | (flags))) @@ -273,22 +255,20 @@ typedef struct ns_scqe #define NS_SCQE_SIZE 16 - - -/* TSQ - Transmit Status Queue +/* + * TSQ - Transmit Status Queue * * Written by the NICStAR, read by the device driver. */ -typedef struct ns_tsi -{ - u32 word_1; - u32 word_2; +typedef struct ns_tsi { + u32 word_1; + u32 word_2; } ns_tsi; /* NOTE: The first word can be a status word copied from the TSR which - originated the TSI, or a timer overflow indicator. In this last - case, the value of the first word is all zeroes. */ + originated the TSI, or a timer overflow indicator. In this last + case, the value of the first word is all zeroes. */ #define NS_TSI_EMPTY 0x80000000 #define NS_TSI_TIMESTAMP_MASK 0x00FFFFFF @@ -301,12 +281,10 @@ typedef struct ns_tsi #define ns_tsi_init(ns_tsip) \ ((ns_tsip)->word_2 = cpu_to_le32(NS_TSI_EMPTY)) - #define NS_TSQSIZE 8192 #define NS_TSQ_NUM_ENTRIES 1024 #define NS_TSQ_ALIGNMENT 8192 - #define NS_TSI_SCDISVBR NS_TSR_SCDISVBR #define ns_tsi_tmrof(ns_tsip) \ @@ -316,26 +294,22 @@ typedef struct ns_tsi #define ns_tsi_getscqpos(ns_tsip) \ (le32_to_cpu((ns_tsip)->word_1) & 0x00007FFF) +/* NICStAR structures located in local SRAM */ - -/* NICStAR structures located in local SRAM ***********************************/ - - - -/* RCT - Receive Connection Table +/* + * RCT - Receive Connection Table * * Written by both the NICStAR and the device driver. */ -typedef struct ns_rcte -{ - u32 word_1; - u32 buffer_handle; - u32 dma_address; - u32 aal5_crc32; +typedef struct ns_rcte { + u32 word_1; + u32 buffer_handle; + u32 dma_address; + u32 aal5_crc32; } ns_rcte; -#define NS_RCTE_BSFB 0x00200000 /* Rev. D only */ +#define NS_RCTE_BSFB 0x00200000 /* Rev. D only */ #define NS_RCTE_NZGFC 0x00100000 #define NS_RCTE_CONNECTOPEN 0x00080000 #define NS_RCTE_AALMASK 0x00070000 @@ -358,25 +332,21 @@ typedef struct ns_rcte #define NS_RCT_ENTRY_SIZE 4 /* Number of dwords */ /* NOTE: We could make macros to contruct the first word of the RCTE, - but that doesn't seem to make much sense... */ + but that doesn't seem to make much sense... */ - - -/* FBD - Free Buffer Descriptor +/* + * FBD - Free Buffer Descriptor * * Written by the device driver using via the command register. */ -typedef struct ns_fbd -{ - u32 buffer_handle; - u32 dma_address; +typedef struct ns_fbd { + u32 buffer_handle; + u32 dma_address; } ns_fbd; - - - -/* TST - Transmit Schedule Table +/* + * TST - Transmit Schedule Table * * Written by the device driver. */ @@ -385,40 +355,38 @@ typedef u32 ns_tste; #define NS_TST_OPCODE_MASK 0x60000000 -#define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */ -#define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */ +#define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */ +#define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */ #define NS_TST_OPCODE_VARIABLE 0x40000000 -#define NS_TST_OPCODE_END 0x60000000 /* Jump */ +#define NS_TST_OPCODE_END 0x60000000 /* Jump */ #define ns_tste_make(opcode, sramad) (opcode | sramad) /* NOTE: - When the opcode is FIXED, sramad specifies the SRAM address of the - SCD for that fixed rate channel. + SCD for that fixed rate channel. - When the opcode is END, sramad specifies the SRAM address of the - location of the next TST entry to read. + location of the next TST entry to read. */ - - -/* SCD - Segmentation Channel Descriptor +/* + * SCD - Segmentation Channel Descriptor * * Written by both the device driver and the NICStAR */ -typedef struct ns_scd -{ - u32 word_1; - u32 word_2; - u32 partial_aal5_crc; - u32 reserved; - ns_scqe cache_a; - ns_scqe cache_b; +typedef struct ns_scd { + u32 word_1; + u32 word_2; + u32 partial_aal5_crc; + u32 reserved; + ns_scqe cache_a; + ns_scqe cache_b; } ns_scd; -#define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */ -#define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */ +#define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */ +#define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */ #define NS_SCD_TAIL_MASK_VAR 0x00001FF0 #define NS_SCD_TAIL_MASK_FIX 0x000003F0 #define NS_SCD_HEAD_MASK_VAR 0x00001FF0 @@ -426,13 +394,9 @@ typedef struct ns_scd #define NS_SCD_XMITFOREVER 0x02000000 /* NOTE: There are other fields in word 2 of the SCD, but as they should - not be needed in the device driver they are not defined here. */ - - - - -/* NICStAR local SRAM memory map **********************************************/ + not be needed in the device driver they are not defined here. */ +/* NICStAR local SRAM memory map */ #define NS_RCT 0x00000 #define NS_RCT_32_END 0x03FFF @@ -455,100 +419,93 @@ typedef struct ns_scd #define NS_LGFBQ 0x1FC00 #define NS_LGFBQ_END 0x1FFFF - - -/* NISCtAR operation registers ************************************************/ - +/* NISCtAR operation registers */ /* See Section 3.4 of `IDT77211 NICStAR User Manual' from www.idt.com */ -enum ns_regs -{ - DR0 = 0x00, /* Data Register 0 R/W*/ - DR1 = 0x04, /* Data Register 1 W */ - DR2 = 0x08, /* Data Register 2 W */ - DR3 = 0x0C, /* Data Register 3 W */ - CMD = 0x10, /* Command W */ - CFG = 0x14, /* Configuration R/W */ - STAT = 0x18, /* Status R/W */ - RSQB = 0x1C, /* Receive Status Queue Base W */ - RSQT = 0x20, /* Receive Status Queue Tail R */ - RSQH = 0x24, /* Receive Status Queue Head W */ - CDC = 0x28, /* Cell Drop Counter R/clear */ - VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */ - ICC = 0x30, /* Invalid Cell Count R/clear */ - RAWCT = 0x34, /* Raw Cell Tail R */ - TMR = 0x38, /* Timer R */ - TSTB = 0x3C, /* Transmit Schedule Table Base R/W */ - TSQB = 0x40, /* Transmit Status Queue Base W */ - TSQT = 0x44, /* Transmit Status Queue Tail R */ - TSQH = 0x48, /* Transmit Status Queue Head W */ - GP = 0x4C, /* General Purpose R/W */ - VPM = 0x50 /* VPI/VCI Mask W */ +enum ns_regs { + DR0 = 0x00, /* Data Register 0 R/W */ + DR1 = 0x04, /* Data Register 1 W */ + DR2 = 0x08, /* Data Register 2 W */ + DR3 = 0x0C, /* Data Register 3 W */ + CMD = 0x10, /* Command W */ + CFG = 0x14, /* Configuration R/W */ + STAT = 0x18, /* Status R/W */ + RSQB = 0x1C, /* Receive Status Queue Base W */ + RSQT = 0x20, /* Receive Status Queue Tail R */ + RSQH = 0x24, /* Receive Status Queue Head W */ + CDC = 0x28, /* Cell Drop Counter R/clear */ + VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */ + ICC = 0x30, /* Invalid Cell Count R/clear */ + RAWCT = 0x34, /* Raw Cell Tail R */ + TMR = 0x38, /* Timer R */ + TSTB = 0x3C, /* Transmit Schedule Table Base R/W */ + TSQB = 0x40, /* Transmit Status Queue Base W */ + TSQT = 0x44, /* Transmit Status Queue Tail R */ + TSQH = 0x48, /* Transmit Status Queue Head W */ + GP = 0x4C, /* General Purpose R/W */ + VPM = 0x50 /* VPI/VCI Mask W */ }; - -/* NICStAR commands issued to the CMD register ********************************/ - +/* NICStAR commands issued to the CMD register */ /* Top 4 bits are command opcode, lower 28 are parameters. */ #define NS_CMD_NO_OPERATION 0x00000000 - /* params always 0 */ + /* params always 0 */ #define NS_CMD_OPENCLOSE_CONNECTION 0x20000000 - /* b19{1=open,0=close} b18-2{SRAM addr} */ + /* b19{1=open,0=close} b18-2{SRAM addr} */ #define NS_CMD_WRITE_SRAM 0x40000000 - /* b18-2{SRAM addr} b1-0{burst size} */ + /* b18-2{SRAM addr} b1-0{burst size} */ #define NS_CMD_READ_SRAM 0x50000000 - /* b18-2{SRAM addr} */ + /* b18-2{SRAM addr} */ #define NS_CMD_WRITE_FREEBUFQ 0x60000000 - /* b0{large buf indicator} */ + /* b0{large buf indicator} */ #define NS_CMD_READ_UTILITY 0x80000000 - /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ + /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ #define NS_CMD_WRITE_UTILITY 0x90000000 - /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ + /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */ #define NS_CMD_OPEN_CONNECTION (NS_CMD_OPENCLOSE_CONNECTION | 0x00080000) #define NS_CMD_CLOSE_CONNECTION NS_CMD_OPENCLOSE_CONNECTION - -/* NICStAR configuration bits *************************************************/ - -#define NS_CFG_SWRST 0x80000000 /* Software Reset */ -#define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */ -#define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */ -#define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */ -#define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue - Interrupt Enable */ -#define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */ -#define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */ -#define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */ -#define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */ -#define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */ -#define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */ -#define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt - Handling */ -#define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */ -#define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full - Interrupt Enable */ -#define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */ -#define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt - Enable */ -#define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */ -#define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt - Enable */ -#define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt - Enable */ -#define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */ -#define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full - Interrupt Enable */ -#define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */ +/* NICStAR configuration bits */ + +#define NS_CFG_SWRST 0x80000000 /* Software Reset */ +#define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */ +#define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */ +#define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */ +#define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue + Interrupt Enable */ +#define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */ +#define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */ +#define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */ +#define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */ +#define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */ +#define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */ +#define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt + Handling */ +#define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */ +#define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full + Interrupt Enable */ +#define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */ +#define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt + Enable */ +#define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */ +#define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt + Enable */ +#define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt + Enable */ +#define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */ +#define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full + Interrupt Enable */ +#define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */ #define NS_CFG_SMBUFSIZE_48 0x00000000 #define NS_CFG_SMBUFSIZE_96 0x08000000 @@ -579,33 +536,29 @@ enum ns_regs #define NS_CFG_RXINT_624US 0x00003000 #define NS_CFG_RXINT_899US 0x00004000 - -/* NICStAR STATus bits ********************************************************/ - -#define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */ -#define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */ -#define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */ -#define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */ -#define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */ -#define NS_STAT_TMROF 0x00000800 /* Timer Overflow */ -#define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */ -#define NS_STAT_CMDBZ 0x00000200 /* Command Busy */ -#define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */ -#define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */ -#define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */ -#define NS_STAT_EOPDU 0x00000020 /* End of PDU */ -#define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */ -#define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */ -#define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */ -#define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */ +/* NICStAR STATus bits */ + +#define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */ +#define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */ +#define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */ +#define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */ +#define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */ +#define NS_STAT_TMROF 0x00000800 /* Timer Overflow */ +#define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */ +#define NS_STAT_CMDBZ 0x00000200 /* Command Busy */ +#define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */ +#define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */ +#define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */ +#define NS_STAT_EOPDU 0x00000020 /* End of PDU */ +#define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */ +#define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */ +#define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */ +#define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */ #define ns_stat_sfbqc_get(stat) (((stat) & NS_STAT_SFBQC_MASK) >> 23) #define ns_stat_lfbqc_get(stat) (((stat) & NS_STAT_LFBQC_MASK) >> 15) - - -/* #defines which depend on other #defines ************************************/ - +/* #defines which depend on other #defines */ #define NS_TST0 NS_TST_FRSCD #define NS_TST1 (NS_TST_FRSCD + NS_TST_NUM_ENTRIES + 1) @@ -672,8 +625,7 @@ enum ns_regs #define NS_CFG_TSQFIE_OPT 0x00000000 #endif /* ENABLE_TSQFIE */ - -/* PCI stuff ******************************************************************/ +/* PCI stuff */ #ifndef PCI_VENDOR_ID_IDT #define PCI_VENDOR_ID_IDT 0x111D @@ -683,138 +635,124 @@ enum ns_regs #define PCI_DEVICE_ID_IDT_IDT77201 0x0001 #endif /* PCI_DEVICE_ID_IDT_IDT77201 */ +/* Device driver structures */ - -/* Device driver structures ***************************************************/ - - -struct ns_skb_cb { - u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ +struct ns_skb_prv { + u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ + u32 dma; + int iovcnt; }; -#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb)) - -typedef struct tsq_info -{ - void *org; - ns_tsi *base; - ns_tsi *next; - ns_tsi *last; +#define NS_PRV_BUFTYPE(skb) \ + (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->buf_type) +#define NS_PRV_DMA(skb) \ + (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->dma) +#define NS_PRV_IOVCNT(skb) \ + (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->iovcnt) + +typedef struct tsq_info { + void *org; + dma_addr_t dma; + ns_tsi *base; + ns_tsi *next; + ns_tsi *last; } tsq_info; - -typedef struct scq_info -{ - void *org; - ns_scqe *base; - ns_scqe *last; - ns_scqe *next; - volatile ns_scqe *tail; /* Not related to the nicstar register */ - unsigned num_entries; - struct sk_buff **skb; /* Pointer to an array of pointers - to the sk_buffs used for tx */ - u32 scd; /* SRAM address of the corresponding - SCD */ - int tbd_count; /* Only meaningful on variable rate */ - wait_queue_head_t scqfull_waitq; - volatile char full; /* SCQ full indicator */ - spinlock_t lock; /* SCQ spinlock */ +typedef struct scq_info { + void *org; + dma_addr_t dma; + ns_scqe *base; + ns_scqe *last; + ns_scqe *next; + volatile ns_scqe *tail; /* Not related to the nicstar register */ + unsigned num_entries; + struct sk_buff **skb; /* Pointer to an array of pointers + to the sk_buffs used for tx */ + u32 scd; /* SRAM address of the corresponding + SCD */ + int tbd_count; /* Only meaningful on variable rate */ + wait_queue_head_t scqfull_waitq; + volatile char full; /* SCQ full indicator */ + spinlock_t lock; /* SCQ spinlock */ } scq_info; - - -typedef struct rsq_info -{ - void *org; - ns_rsqe *base; - ns_rsqe *next; - ns_rsqe *last; +typedef struct rsq_info { + void *org; + dma_addr_t dma; + ns_rsqe *base; + ns_rsqe *next; + ns_rsqe *last; } rsq_info; - -typedef struct skb_pool -{ - volatile int count; /* number of buffers in the queue */ - struct sk_buff_head queue; +typedef struct skb_pool { + volatile int count; /* number of buffers in the queue */ + struct sk_buff_head queue; } skb_pool; /* NOTE: for small and large buffer pools, the count is not used, as the actual value used for buffer management is the one read from the card. */ - -typedef struct vc_map -{ - volatile unsigned int tx:1; /* TX vc? */ - volatile unsigned int rx:1; /* RX vc? */ - struct atm_vcc *tx_vcc, *rx_vcc; - struct sk_buff *rx_iov; /* RX iovector skb */ - scq_info *scq; /* To keep track of the SCQ */ - u32 cbr_scd; /* SRAM address of the corresponding - SCD. 0x00000000 for UBR/VBR/ABR */ - int tbd_count; +typedef struct vc_map { + volatile unsigned int tx:1; /* TX vc? */ + volatile unsigned int rx:1; /* RX vc? */ + struct atm_vcc *tx_vcc, *rx_vcc; + struct sk_buff *rx_iov; /* RX iovector skb */ + scq_info *scq; /* To keep track of the SCQ */ + u32 cbr_scd; /* SRAM address of the corresponding + SCD. 0x00000000 for UBR/VBR/ABR */ + int tbd_count; } vc_map; - -struct ns_skb_data -{ - struct atm_vcc *vcc; - int iovcnt; -}; - -#define NS_SKB(skb) (((struct ns_skb_data *) (skb)->cb)) - - -typedef struct ns_dev -{ - int index; /* Card ID to the device driver */ - int sram_size; /* In k x 32bit words. 32 or 128 */ - void __iomem *membase; /* Card's memory base address */ - unsigned long max_pcr; - int rct_size; /* Number of entries */ - int vpibits; - int vcibits; - struct pci_dev *pcidev; - struct atm_dev *atmdev; - tsq_info tsq; - rsq_info rsq; - scq_info *scq0, *scq1, *scq2; /* VBR SCQs */ - skb_pool sbpool; /* Small buffers */ - skb_pool lbpool; /* Large buffers */ - skb_pool hbpool; /* Pre-allocated huge buffers */ - skb_pool iovpool; /* iovector buffers */ - volatile int efbie; /* Empty free buf. queue int. enabled */ - volatile u32 tst_addr; /* SRAM address of the TST in use */ - volatile int tst_free_entries; - vc_map vcmap[NS_MAX_RCTSIZE]; - vc_map *tste2vc[NS_TST_NUM_ENTRIES]; - vc_map *scd2vc[NS_FRSCD_NUM]; - buf_nr sbnr; - buf_nr lbnr; - buf_nr hbnr; - buf_nr iovnr; - int sbfqc; - int lbfqc; - u32 sm_handle; - u32 sm_addr; - u32 lg_handle; - u32 lg_addr; - struct sk_buff *rcbuf; /* Current raw cell buffer */ - u32 rawch; /* Raw cell queue head */ - unsigned intcnt; /* Interrupt counter */ - spinlock_t int_lock; /* Interrupt lock */ - spinlock_t res_lock; /* Card resource lock */ +typedef struct ns_dev { + int index; /* Card ID to the device driver */ + int sram_size; /* In k x 32bit words. 32 or 128 */ + void __iomem *membase; /* Card's memory base address */ + unsigned long max_pcr; + int rct_size; /* Number of entries */ + int vpibits; + int vcibits; + struct pci_dev *pcidev; + struct idr idr; + struct atm_dev *atmdev; + tsq_info tsq; + rsq_info rsq; + scq_info *scq0, *scq1, *scq2; /* VBR SCQs */ + skb_pool sbpool; /* Small buffers */ + skb_pool lbpool; /* Large buffers */ + skb_pool hbpool; /* Pre-allocated huge buffers */ + skb_pool iovpool; /* iovector buffers */ + volatile int efbie; /* Empty free buf. queue int. enabled */ + volatile u32 tst_addr; /* SRAM address of the TST in use */ + volatile int tst_free_entries; + vc_map vcmap[NS_MAX_RCTSIZE]; + vc_map *tste2vc[NS_TST_NUM_ENTRIES]; + vc_map *scd2vc[NS_FRSCD_NUM]; + buf_nr sbnr; + buf_nr lbnr; + buf_nr hbnr; + buf_nr iovnr; + int sbfqc; + int lbfqc; + struct sk_buff *sm_handle; + u32 sm_addr; + struct sk_buff *lg_handle; + u32 lg_addr; + struct sk_buff *rcbuf; /* Current raw cell buffer */ + struct ns_rcqe *rawcell; + u32 rawch; /* Raw cell queue head */ + unsigned intcnt; /* Interrupt counter */ + spinlock_t int_lock; /* Interrupt lock */ + spinlock_t res_lock; /* Card resource lock */ } ns_dev; - /* NOTE: Each tste2vc entry relates a given TST entry to the corresponding - CBR vc. If the entry is not allocated, it must be NULL. - - There are two TSTs so the driver can modify them on the fly - without stopping the transmission. - - scd2vc allows us to find out unused fixed rate SCDs, because - they must have a NULL pointer here. */ + CBR vc. If the entry is not allocated, it must be NULL. + + There are two TSTs so the driver can modify them on the fly + without stopping the transmission. + scd2vc allows us to find out unused fixed rate SCDs, because + they must have a NULL pointer here. */ #endif /* _LINUX_NICSTAR_H_ */ diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c index 842e26c45557f850d4f3d0280f353eae4adb71e6..f594526f8c6dc8f632c67af54c8dbe0488e2261b 100644 --- a/drivers/atm/nicstarmac.c +++ b/drivers/atm/nicstarmac.c @@ -13,15 +13,15 @@ typedef void __iomem *virt_addr_t; #define CYCLE_DELAY 5 -/* This was the original definition +/* + This was the original definition #define osp_MicroDelay(microsec) \ do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) */ #define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \ udelay((useconds));} - - -/* The following tables represent the timing diagrams found in +/* + * The following tables represent the timing diagrams found in * the Data Sheet for the Xicor X25020 EEProm. The #defines below * represent the bits in the NICStAR's General Purpose register * that must be toggled for the corresponding actions on the EEProm @@ -31,86 +31,80 @@ typedef void __iomem *virt_addr_t; /* Write Data To EEProm from SI line on rising edge of CLK */ /* Read Data From EEProm on falling edge of CLK */ -#define CS_HIGH 0x0002 /* Chip select high */ -#define CS_LOW 0x0000 /* Chip select low (active low)*/ -#define CLK_HIGH 0x0004 /* Clock high */ -#define CLK_LOW 0x0000 /* Clock low */ -#define SI_HIGH 0x0001 /* Serial input data high */ -#define SI_LOW 0x0000 /* Serial input data low */ +#define CS_HIGH 0x0002 /* Chip select high */ +#define CS_LOW 0x0000 /* Chip select low (active low) */ +#define CLK_HIGH 0x0004 /* Clock high */ +#define CLK_LOW 0x0000 /* Clock low */ +#define SI_HIGH 0x0001 /* Serial input data high */ +#define SI_LOW 0x0000 /* Serial input data low */ /* Read Status Register = 0000 0101b */ #if 0 -static u_int32_t rdsrtab[] = -{ - CS_HIGH | CLK_HIGH, - CS_LOW | CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW | SI_HIGH, - CLK_HIGH | SI_HIGH, /* 1 */ - CLK_LOW | SI_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW | SI_HIGH, - CLK_HIGH | SI_HIGH /* 1 */ +static u_int32_t rdsrtab[] = { + CS_HIGH | CLK_HIGH, + CS_LOW | CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW | SI_HIGH, + CLK_HIGH | SI_HIGH, /* 1 */ + CLK_LOW | SI_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW | SI_HIGH, + CLK_HIGH | SI_HIGH /* 1 */ }; -#endif /* 0 */ - +#endif /* 0 */ /* Read from EEPROM = 0000 0011b */ -static u_int32_t readtab[] = -{ - /* - CS_HIGH | CLK_HIGH, - */ - CS_LOW | CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW, - CLK_HIGH, /* 0 */ - CLK_LOW | SI_HIGH, - CLK_HIGH | SI_HIGH, /* 1 */ - CLK_LOW | SI_HIGH, - CLK_HIGH | SI_HIGH /* 1 */ +static u_int32_t readtab[] = { + /* + CS_HIGH | CLK_HIGH, + */ + CS_LOW | CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW, + CLK_HIGH, /* 0 */ + CLK_LOW | SI_HIGH, + CLK_HIGH | SI_HIGH, /* 1 */ + CLK_LOW | SI_HIGH, + CLK_HIGH | SI_HIGH /* 1 */ }; - /* Clock to read from/write to the eeprom */ -static u_int32_t clocktab[] = -{ - CLK_LOW, - CLK_HIGH, - CLK_LOW, - CLK_HIGH, - CLK_LOW, - CLK_HIGH, - CLK_LOW, - CLK_HIGH, - CLK_LOW, - CLK_HIGH, - CLK_LOW, - CLK_HIGH, - CLK_LOW, - CLK_HIGH, - CLK_LOW, - CLK_HIGH, - CLK_LOW +static u_int32_t clocktab[] = { + CLK_LOW, + CLK_HIGH, + CLK_LOW, + CLK_HIGH, + CLK_LOW, + CLK_HIGH, + CLK_LOW, + CLK_HIGH, + CLK_LOW, + CLK_HIGH, + CLK_LOW, + CLK_HIGH, + CLK_LOW, + CLK_HIGH, + CLK_LOW, + CLK_HIGH, + CLK_LOW }; - #define NICSTAR_REG_WRITE(bs, reg, val) \ while ( readl(bs + STAT) & 0x0200 ) ; \ writel((val),(base)+(reg)) @@ -124,153 +118,131 @@ static u_int32_t clocktab[] = * register. */ #if 0 -u_int32_t -nicstar_read_eprom_status( virt_addr_t base ) +u_int32_t nicstar_read_eprom_status(virt_addr_t base) { - u_int32_t val; - u_int32_t rbyte; - int32_t i, j; - - /* Send read instruction */ - val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0; - - for (i=0; i=0; i--) - { - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, - (val | clocktab[j++]) ); - rbyte |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE) - & 0x00010000) >> 16) << i); - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, - (val | clocktab[j++]) ); - osp_MicroDelay( CYCLE_DELAY ); - } - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 ); - osp_MicroDelay( CYCLE_DELAY ); - return rbyte; + u_int32_t val; + u_int32_t rbyte; + int32_t i, j; + + /* Send read instruction */ + val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; + + for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) { + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | rdsrtab[i])); + osp_MicroDelay(CYCLE_DELAY); + } + + /* Done sending instruction - now pull data off of bit 16, MSB first */ + /* Data clocked out of eeprom on falling edge of clock */ + + rbyte = 0; + for (i = 7, j = 0; i >= 0; i--) { + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | clocktab[j++])); + rbyte |= (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) + & 0x00010000) >> 16) << i); + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | clocktab[j++])); + osp_MicroDelay(CYCLE_DELAY); + } + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2); + osp_MicroDelay(CYCLE_DELAY); + return rbyte; } -#endif /* 0 */ - +#endif /* 0 */ /* * This routine will clock the Read_data function into the X2520 * eeprom, followed by the address to read from, through the NicSTaR's General * Purpose register. */ - -static u_int8_t -read_eprom_byte(virt_addr_t base, u_int8_t offset) + +static u_int8_t read_eprom_byte(virt_addr_t base, u_int8_t offset) { - u_int32_t val = 0; - int i,j=0; - u_int8_t tempread = 0; - - val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0; - - /* Send READ instruction */ - for (i=0; i=0; i--) - { - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, - (val | clocktab[j++] | ((offset >> i) & 1) ) ); - osp_MicroDelay(CYCLE_DELAY); - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, - (val | clocktab[j++] | ((offset >> i) & 1) ) ); - osp_MicroDelay( CYCLE_DELAY ); - } - - j = 0; - - /* Now, we can read data from the eeprom by clocking it in */ - for (i=7; i>=0; i--) - { - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, - (val | clocktab[j++]) ); - osp_MicroDelay( CYCLE_DELAY ); - tempread |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) - & 0x00010000) >> 16) << i); - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, - (val | clocktab[j++]) ); - osp_MicroDelay( CYCLE_DELAY ); - } - - NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 ); - osp_MicroDelay( CYCLE_DELAY ); - return tempread; + u_int32_t val = 0; + int i, j = 0; + u_int8_t tempread = 0; + + val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; + + /* Send READ instruction */ + for (i = 0; i < ARRAY_SIZE(readtab); i++) { + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | readtab[i])); + osp_MicroDelay(CYCLE_DELAY); + } + + /* Next, we need to send the byte address to read from */ + for (i = 7; i >= 0; i--) { + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | clocktab[j++] | ((offset >> i) & 1))); + osp_MicroDelay(CYCLE_DELAY); + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | clocktab[j++] | ((offset >> i) & 1))); + osp_MicroDelay(CYCLE_DELAY); + } + + j = 0; + + /* Now, we can read data from the eeprom by clocking it in */ + for (i = 7; i >= 0; i--) { + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | clocktab[j++])); + osp_MicroDelay(CYCLE_DELAY); + tempread |= + (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) + & 0x00010000) >> 16) << i); + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | clocktab[j++])); + osp_MicroDelay(CYCLE_DELAY); + } + + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2); + osp_MicroDelay(CYCLE_DELAY); + return tempread; } - -static void -nicstar_init_eprom( virt_addr_t base ) +static void nicstar_init_eprom(virt_addr_t base) { - u_int32_t val; + u_int32_t val; - /* - * turn chip select off - */ - val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; + /* + * turn chip select off + */ + val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0; - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, - (val | CS_HIGH | CLK_HIGH)); - osp_MicroDelay( CYCLE_DELAY ); + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | CS_HIGH | CLK_HIGH)); + osp_MicroDelay(CYCLE_DELAY); - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, - (val | CS_HIGH | CLK_LOW)); - osp_MicroDelay( CYCLE_DELAY ); + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | CS_HIGH | CLK_LOW)); + osp_MicroDelay(CYCLE_DELAY); - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, - (val | CS_HIGH | CLK_HIGH)); - osp_MicroDelay( CYCLE_DELAY ); + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | CS_HIGH | CLK_HIGH)); + osp_MicroDelay(CYCLE_DELAY); - NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, - (val | CS_HIGH | CLK_LOW)); - osp_MicroDelay( CYCLE_DELAY ); + NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, + (val | CS_HIGH | CLK_LOW)); + osp_MicroDelay(CYCLE_DELAY); } - /* * This routine will be the interface to the ReadPromByte function * above. - */ + */ static void -nicstar_read_eprom( - virt_addr_t base, - u_int8_t prom_offset, - u_int8_t *buffer, - u_int32_t nbytes ) +nicstar_read_eprom(virt_addr_t base, + u_int8_t prom_offset, u_int8_t * buffer, u_int32_t nbytes) { - u_int i; - - for (i=0; iatmdev[port]->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST); release_vccs(card->atmdev[port]); dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str); return 0; @@ -401,7 +401,7 @@ static int process_status(struct solos_card *card, int port, struct sk_buff *skb snr[0]?", SNR ":"", snr, attn[0]?", Attn ":"", attn); card->atmdev[port]->link_rate = rate_down / 424; - card->atmdev[port]->signal = ATM_PHY_SIG_FOUND; + atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_FOUND); return 0; } @@ -1246,7 +1246,7 @@ static int atm_init(struct solos_card *card) card->atmdev[i]->ci_range.vci_bits = 16; card->atmdev[i]->dev_data = card; card->atmdev[i]->phy_data = (void *)(unsigned long)i; - card->atmdev[i]->signal = ATM_PHY_SIG_UNKNOWN; + atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_UNKNOWN); skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c index da4b91ffa53e7aade73d36ed12819853ae2ffff6..41c56eae4c81622e3e22eae7b31a25d3d56dbe21 100644 --- a/drivers/atm/suni.c +++ b/drivers/atm/suni.c @@ -291,8 +291,9 @@ static int suni_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) static void poll_los(struct atm_dev *dev) { - dev->signal = GET(RSOP_SIS) & SUNI_RSOP_SIS_LOSV ? ATM_PHY_SIG_LOST : - ATM_PHY_SIG_FOUND; + atm_dev_signal_change(dev, + GET(RSOP_SIS) & SUNI_RSOP_SIS_LOSV ? + ATM_PHY_SIG_LOST : ATM_PHY_SIG_FOUND); } diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 702accec89e9d3f42aec6ef905d278be4dd65650..4e885d2da49c6f0b5899b9eb3d8d1a874e700ca3 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1637,10 +1637,8 @@ static int __devinit zatm_init_one(struct pci_dev *pci_dev, MODULE_LICENSE("GPL"); static struct pci_device_id zatm_pci_tbl[] __devinitdata = { - { PCI_VENDOR_ID_ZEITNET, PCI_DEVICE_ID_ZEITNET_1221, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, ZATM_COPPER }, - { PCI_VENDOR_ID_ZEITNET, PCI_DEVICE_ID_ZEITNET_1225, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, + { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, zatm_pci_tbl); diff --git a/drivers/base/core.c b/drivers/base/core.c index 9b9d3bd54e3a5359f8f0aa3e833ab5380ea11a47..f8e72724dd4b78c0d7048f552a5be29bb189956b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1819,3 +1819,67 @@ void device_shutdown(void) spin_unlock(&devices_kset->list_lock); async_synchronize_full(); } + +/* + * Device logging functions + */ + +#ifdef CONFIG_PRINTK + +static int __dev_printk(const char *level, const struct device *dev, + struct va_format *vaf) +{ + if (!dev) + return printk("%s(NULL device *): %pV", level, vaf); + + return printk("%s%s %s: %pV", + level, dev_driver_string(dev), dev_name(dev), vaf); +} + +int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + r = __dev_printk(level, dev, &vaf); + va_end(args); + + return r; +} +EXPORT_SYMBOL(dev_printk); + +#define define_dev_printk_level(func, kern_level) \ +int func(const struct device *dev, const char *fmt, ...) \ +{ \ + struct va_format vaf; \ + va_list args; \ + int r; \ + \ + va_start(args, fmt); \ + \ + vaf.fmt = fmt; \ + vaf.va = &args; \ + \ + r = __dev_printk(kern_level, dev, &vaf); \ + va_end(args); \ + \ + return r; \ +} \ +EXPORT_SYMBOL(func); + +define_dev_printk_level(dev_emerg, KERN_EMERG); +define_dev_printk_level(dev_alert, KERN_ALERT); +define_dev_printk_level(dev_crit, KERN_CRIT); +define_dev_printk_level(dev_err, KERN_ERR); +define_dev_printk_level(dev_warn, KERN_WARNING); +define_dev_printk_level(dev_notice, KERN_NOTICE); +define_dev_printk_level(_dev_info, KERN_INFO); + +#endif diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 058fbccf2f52bea5264b5a585994b0b7eb777937..02deef424926a793e6bcc76594ae80e1b1a0d095 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -58,6 +58,18 @@ config BT_HCIUART_BCSP Say Y here to compile support for HCI BCSP protocol. +config BT_HCIUART_ATH3K + bool "Atheros AR300x serial support" + depends on BT_HCIUART + help + HCIATH3K (HCI Atheros AR300x) is a serial protocol for + communication between host and Atheros AR300x Bluetooth devices. + This protocol enables AR300x chips to be enabled with + power management support. + Enable this if you have Atheros AR300x serial Bluetooth device. + + Say Y here to compile support for HCI UART ATH3K protocol. + config BT_HCIUART_LL bool "HCILL protocol support" depends on BT_HCIUART diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 7e5aed59812109d70de6cf495f43fb7382e09884..71bdf13287c471d78678d0d02a8d385ac2e63772 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -26,4 +26,5 @@ hci_uart-y := hci_ldisc.o hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o +hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o hci_uart-objs := $(hci_uart-y) diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c index b0c84c19f442e3ad41fc7340b50e33366c75adcd..8b1b643a519b330a8119ad5bde789ec105edfdfc 100644 --- a/drivers/bluetooth/bcm203x.c +++ b/drivers/bluetooth/bcm203x.c @@ -224,7 +224,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); - data->fw_data = kmalloc(firmware->size, GFP_KERNEL); + data->fw_data = kmemdup(firmware->data, firmware->size, GFP_KERNEL); if (!data->fw_data) { BT_ERR("Can't allocate memory for firmware image"); release_firmware(firmware); @@ -234,7 +234,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id return -ENOMEM; } - memcpy(data->fw_data, firmware->data, firmware->size); data->fw_size = firmware->size; data->fw_sent = 0; diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index d945cd12433a709c1c002533e418a2771988005d..751b338d904a7530dba3ba901cf81c75e0a9d198 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -62,7 +62,7 @@ struct hci_vendor_hdr { __u8 type; __le16 snum; __le16 dlen; -} __attribute__ ((packed)); +} __packed; static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count) { diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c index b50b41d97a7f2d781ebf5e0c58a79e1298171f1b..54739b08c3083d413c70981fc0f0ca095cec0716 100644 --- a/drivers/bluetooth/btmrvl_debugfs.c +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -216,7 +216,7 @@ static const struct file_operations btmrvl_gpiogap_fops = { static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - struct btmrvl_private *priv = (struct btmrvl_private *) file->private_data; + struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index bed0ba630235b46fb796b20116f4ea11d414675b..90bda50dc4465757d2f8431f7bdc64295ba8bf88 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -76,6 +76,7 @@ struct btmrvl_private { int (*hw_host_to_card) (struct btmrvl_private *priv, u8 *payload, u16 nb); int (*hw_wakeup_firmware) (struct btmrvl_private *priv); + int (*hw_process_int_status) (struct btmrvl_private *priv); spinlock_t driver_lock; /* spinlock used by driver */ #ifdef CONFIG_DEBUG_FS void *debugfs_data; @@ -118,13 +119,13 @@ struct btmrvl_cmd { __le16 ocf_ogf; u8 length; u8 data[4]; -} __attribute__ ((packed)); +} __packed; struct btmrvl_event { u8 ec; /* event counter */ u8 length; u8 data[4]; -} __attribute__ ((packed)); +} __packed; /* Prototype of global function */ diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index ee37ef0caee269c0383fd9336ea9cf0d665ddba0..0d32ec82e9bfa5a72e847e43202830f91b2c26d0 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -502,14 +502,17 @@ static int btmrvl_service_main_thread(void *data) spin_lock_irqsave(&priv->driver_lock, flags); if (adapter->int_count) { adapter->int_count = 0; + spin_unlock_irqrestore(&priv->driver_lock, flags); + priv->hw_process_int_status(priv); } else if (adapter->ps_state == PS_SLEEP && !skb_queue_empty(&adapter->tx_queue)) { spin_unlock_irqrestore(&priv->driver_lock, flags); adapter->wakeup_tries++; priv->hw_wakeup_firmware(priv); continue; + } else { + spin_unlock_irqrestore(&priv->driver_lock, flags); } - spin_unlock_irqrestore(&priv->driver_lock, flags); if (adapter->ps_state == PS_SLEEP) continue; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index df0773ebd9e4fc4db3ef8d9d897adfd497d2eb31..dcc2a6ec23f00eb191bda5047387de18eb71d923 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -47,6 +47,7 @@ * module_exit function is called. */ static u8 user_rmmod; +static u8 sdio_ireg; static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = { .helper = "sd8688_helper.bin", @@ -83,10 +84,10 @@ static int btmrvl_sdio_read_fw_status(struct btmrvl_sdio_card *card, u16 *dat) *dat = 0; fws0 = sdio_readb(card->func, CARD_FW_STATUS0_REG, &ret); + if (ret) + return -EIO; - if (!ret) - fws1 = sdio_readb(card->func, CARD_FW_STATUS1_REG, &ret); - + fws1 = sdio_readb(card->func, CARD_FW_STATUS1_REG, &ret); if (ret) return -EIO; @@ -216,7 +217,7 @@ static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) tmphlprbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); - tmphlprbuf = kmalloc(tmphlprbufsz, GFP_KERNEL); + tmphlprbuf = kzalloc(tmphlprbufsz, GFP_KERNEL); if (!tmphlprbuf) { BT_ERR("Unable to allocate buffer for helper." " Terminating download"); @@ -224,8 +225,6 @@ static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) goto done; } - memset(tmphlprbuf, 0, tmphlprbufsz); - helperbuf = (u8 *) ALIGN_ADDR(tmphlprbuf, BTSDIO_DMA_ALIGN); /* Perform helper data transfer */ @@ -318,7 +317,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) BT_DBG("Downloading FW image (%d bytes)", firmwarelen); tmpfwbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); - tmpfwbuf = kmalloc(tmpfwbufsz, GFP_KERNEL); + tmpfwbuf = kzalloc(tmpfwbufsz, GFP_KERNEL); if (!tmpfwbuf) { BT_ERR("Unable to allocate buffer for firmware." " Terminating download"); @@ -326,8 +325,6 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) goto done; } - memset(tmpfwbuf, 0, tmpfwbufsz); - /* Ensure aligned firmware buffer */ fwbuf = (u8 *) ALIGN_ADDR(tmpfwbuf, BTSDIO_DMA_ALIGN); @@ -555,78 +552,79 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) return ret; } -static int btmrvl_sdio_get_int_status(struct btmrvl_private *priv, u8 * ireg) +static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv) { - int ret; - u8 sdio_ireg = 0; + ulong flags; + u8 ireg; struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; - *ireg = 0; - - sdio_ireg = sdio_readb(card->func, HOST_INTSTATUS_REG, &ret); - if (ret) { - BT_ERR("sdio_readb: read int status register failed"); - ret = -EIO; - goto done; - } - - if (sdio_ireg != 0) { - /* - * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS - * Clear the interrupt status register and re-enable the - * interrupt. - */ - BT_DBG("sdio_ireg = 0x%x", sdio_ireg); - - sdio_writeb(card->func, ~(sdio_ireg) & (DN_LD_HOST_INT_STATUS | - UP_LD_HOST_INT_STATUS), - HOST_INTSTATUS_REG, &ret); - if (ret) { - BT_ERR("sdio_writeb: clear int status register " - "failed"); - ret = -EIO; - goto done; - } - } + spin_lock_irqsave(&priv->driver_lock, flags); + ireg = sdio_ireg; + sdio_ireg = 0; + spin_unlock_irqrestore(&priv->driver_lock, flags); - if (sdio_ireg & DN_LD_HOST_INT_STATUS) { + sdio_claim_host(card->func); + if (ireg & DN_LD_HOST_INT_STATUS) { if (priv->btmrvl_dev.tx_dnld_rdy) BT_DBG("tx_done already received: " - " int_status=0x%x", sdio_ireg); + " int_status=0x%x", ireg); else priv->btmrvl_dev.tx_dnld_rdy = true; } - if (sdio_ireg & UP_LD_HOST_INT_STATUS) + if (ireg & UP_LD_HOST_INT_STATUS) btmrvl_sdio_card_to_host(priv); - *ireg = sdio_ireg; - - ret = 0; + sdio_release_host(card->func); -done: - return ret; + return 0; } static void btmrvl_sdio_interrupt(struct sdio_func *func) { struct btmrvl_private *priv; - struct hci_dev *hcidev; struct btmrvl_sdio_card *card; + ulong flags; u8 ireg = 0; + int ret; card = sdio_get_drvdata(func); - if (card && card->priv) { - priv = card->priv; - hcidev = priv->btmrvl_dev.hcidev; + if (!card || !card->priv) { + BT_ERR("sbi_interrupt(%p) card or priv is " + "NULL, card=%p\n", func, card); + return; + } - if (btmrvl_sdio_get_int_status(priv, &ireg)) - BT_ERR("reading HOST_INT_STATUS_REG failed"); - else - BT_DBG("HOST_INT_STATUS_REG %#x", ireg); + priv = card->priv; + + ireg = sdio_readb(card->func, HOST_INTSTATUS_REG, &ret); + if (ret) { + BT_ERR("sdio_readb: read int status register failed"); + return; + } + + if (ireg != 0) { + /* + * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS + * Clear the interrupt status register and re-enable the + * interrupt. + */ + BT_DBG("ireg = 0x%x", ireg); - btmrvl_interrupt(priv); + sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS | + UP_LD_HOST_INT_STATUS), + HOST_INTSTATUS_REG, &ret); + if (ret) { + BT_ERR("sdio_writeb: clear int status register failed"); + return; + } } + + spin_lock_irqsave(&priv->driver_lock, flags); + sdio_ireg |= ireg; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + btmrvl_interrupt(priv); } static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card) @@ -930,6 +928,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func, /* Initialize the interface specific function pointers */ priv->hw_host_to_card = btmrvl_sdio_host_to_card; priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; + priv->hw_process_int_status = btmrvl_sdio_process_int_status; if (btmrvl_register_hdev(priv)) { BT_ERR("Register hdev failed!"); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5d9cc53bd64315cdc1995af32f4895eecdf20fd6..d22ce3cc611e7bb60f0377ff344fa41fb7bde2db 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -59,6 +59,9 @@ static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + /* Apple iMac11,1 */ + { USB_DEVICE(0x05ac, 0x8215) }, + /* AVM BlueFRITZ! USB v2.0 */ { USB_DEVICE(0x057c, 0x3800) }, @@ -146,6 +149,7 @@ static struct usb_device_id blacklist_table[] = { #define BTUSB_BULK_RUNNING 1 #define BTUSB_ISOC_RUNNING 2 #define BTUSB_SUSPENDING 3 +#define BTUSB_DID_ISO_RESUME 4 struct btusb_data { struct hci_dev *hdev; @@ -179,7 +183,6 @@ struct btusb_data { unsigned int sco_num; int isoc_altsetting; int suspend_count; - int did_iso_resume:1; }; static int inc_tx(struct btusb_data *data) @@ -807,7 +810,7 @@ static void btusb_work(struct work_struct *work) int err; if (hdev->conn_hash.sco_num > 0) { - if (!data->did_iso_resume) { + if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { err = usb_autopm_get_interface(data->isoc); if (err < 0) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); @@ -815,7 +818,7 @@ static void btusb_work(struct work_struct *work) return; } - data->did_iso_resume = 1; + set_bit(BTUSB_DID_ISO_RESUME, &data->flags); } if (data->isoc_altsetting != 2) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); @@ -836,10 +839,8 @@ static void btusb_work(struct work_struct *work) usb_kill_anchored_urbs(&data->isoc_anchor); __set_isoc_interface(hdev, 0); - if (data->did_iso_resume) { - data->did_iso_resume = 0; + if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) usb_autopm_put_interface(data->isoc); - } } } diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index ef044d55cb257d59bedf61575058213675dc3895..cbe9e44a42e96d44bfb4224d284838f90b919060 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -104,7 +104,7 @@ typedef struct { u8 type; u8 zero; u16 len; -} __attribute__ ((packed)) nsh_t; /* Nokia Specific Header */ +} __packed nsh_t; /* Nokia Specific Header */ #define NSHL 4 /* Nokia Specific Header Length */ diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c new file mode 100644 index 0000000000000000000000000000000000000000..6a160c17ea943426b459e5a9ec8ad9000b5b5b8e --- /dev/null +++ b/drivers/bluetooth/hci_ath.c @@ -0,0 +1,235 @@ +/* + * Atheros Communication Bluetooth HCIATH3K UART protocol + * + * HCIATH3K (HCI Atheros AR300x Protocol) is a Atheros Communication's + * power management protocol extension to H4 to support AR300x Bluetooth Chip. + * + * Copyright (c) 2009-2010 Atheros Communications Inc. + * + * Acknowledgements: + * This file is based on hci_h4.c, which was written + * by Maxim Krasnyansky and Marcel Holtmann. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +struct ath_struct { + struct hci_uart *hu; + unsigned int cur_sleep; + + struct sk_buff_head txq; + struct work_struct ctxtsw; +}; + +static int ath_wakeup_ar3k(struct tty_struct *tty) +{ + struct termios settings; + int status = tty->driver->ops->tiocmget(tty, NULL); + + if (status & TIOCM_CTS) + return status; + + /* Disable Automatic RTSCTS */ + n_tty_ioctl_helper(tty, NULL, TCGETS, (unsigned long)&settings); + settings.c_cflag &= ~CRTSCTS; + n_tty_ioctl_helper(tty, NULL, TCSETS, (unsigned long)&settings); + + /* Clear RTS first */ + status = tty->driver->ops->tiocmget(tty, NULL); + tty->driver->ops->tiocmset(tty, NULL, 0x00, TIOCM_RTS); + mdelay(20); + + /* Set RTS, wake up board */ + status = tty->driver->ops->tiocmget(tty, NULL); + tty->driver->ops->tiocmset(tty, NULL, TIOCM_RTS, 0x00); + mdelay(20); + + status = tty->driver->ops->tiocmget(tty, NULL); + + n_tty_ioctl_helper(tty, NULL, TCGETS, (unsigned long)&settings); + settings.c_cflag |= CRTSCTS; + n_tty_ioctl_helper(tty, NULL, TCSETS, (unsigned long)&settings); + + return status; +} + +static void ath_hci_uart_work(struct work_struct *work) +{ + int status; + struct ath_struct *ath; + struct hci_uart *hu; + struct tty_struct *tty; + + ath = container_of(work, struct ath_struct, ctxtsw); + + hu = ath->hu; + tty = hu->tty; + + /* verify and wake up controller */ + if (ath->cur_sleep) { + status = ath_wakeup_ar3k(tty); + if (!(status & TIOCM_CTS)) + return; + } + + /* Ready to send Data */ + clear_bit(HCI_UART_SENDING, &hu->tx_state); + hci_uart_tx_wakeup(hu); +} + +/* Initialize protocol */ +static int ath_open(struct hci_uart *hu) +{ + struct ath_struct *ath; + + BT_DBG("hu %p", hu); + + ath = kzalloc(sizeof(*ath), GFP_ATOMIC); + if (!ath) + return -ENOMEM; + + skb_queue_head_init(&ath->txq); + + hu->priv = ath; + ath->hu = hu; + + INIT_WORK(&ath->ctxtsw, ath_hci_uart_work); + + return 0; +} + +/* Flush protocol data */ +static int ath_flush(struct hci_uart *hu) +{ + struct ath_struct *ath = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ath->txq); + + return 0; +} + +/* Close protocol */ +static int ath_close(struct hci_uart *hu) +{ + struct ath_struct *ath = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ath->txq); + + cancel_work_sync(&ath->ctxtsw); + + hu->priv = NULL; + kfree(ath); + + return 0; +} + +#define HCI_OP_ATH_SLEEP 0xFC04 + +/* Enqueue frame for transmittion */ +static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct ath_struct *ath = hu->priv; + + if (bt_cb(skb)->pkt_type == HCI_SCODATA_PKT) { + kfree_skb(skb); + return 0; + } + + /* + * Update power management enable flag with parameters of + * HCI sleep enable vendor specific HCI command. + */ + if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { + struct hci_command_hdr *hdr = (void *)skb->data; + + if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP) + ath->cur_sleep = skb->data[HCI_COMMAND_HDR_SIZE]; + } + + BT_DBG("hu %p skb %p", hu, skb); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + skb_queue_tail(&ath->txq, skb); + set_bit(HCI_UART_SENDING, &hu->tx_state); + + schedule_work(&ath->ctxtsw); + + return 0; +} + +static struct sk_buff *ath_dequeue(struct hci_uart *hu) +{ + struct ath_struct *ath = hu->priv; + + return skb_dequeue(&ath->txq); +} + +/* Recv data */ +static int ath_recv(struct hci_uart *hu, void *data, int count) +{ + if (hci_recv_stream_fragment(hu->hdev, data, count) < 0) + BT_ERR("Frame Reassembly Failed"); + + return count; +} + +static struct hci_uart_proto athp = { + .id = HCI_UART_ATH3K, + .open = ath_open, + .close = ath_close, + .recv = ath_recv, + .enqueue = ath_enqueue, + .dequeue = ath_dequeue, + .flush = ath_flush, +}; + +int __init ath_init(void) +{ + int err = hci_uart_register_proto(&athp); + + if (!err) + BT_INFO("HCIATH3K protocol initialized"); + else + BT_ERR("HCIATH3K protocol registration failed"); + + return err; +} + +int __exit ath_deinit(void) +{ + return hci_uart_unregister_proto(&athp); +} diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index 42d69d4de05c9859bd01af180bc2937a1443d4fa..9c5b2dc38e29455884618b4dfb5c8256d5fa5496 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -739,7 +739,7 @@ static struct hci_uart_proto bcsp = { .flush = bcsp_flush }; -int bcsp_init(void) +int __init bcsp_init(void) { int err = hci_uart_register_proto(&bcsp); @@ -751,7 +751,7 @@ int bcsp_init(void) return err; } -int bcsp_deinit(void) +int __exit bcsp_deinit(void) { return hci_uart_unregister_proto(&bcsp); } diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 3f038f5308a46064217b971f1f5cc6818d625553..7b8ad93e2c367401e98eef5a38b2f51a3096ddac 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -151,107 +151,8 @@ static inline int h4_check_data_len(struct h4_struct *h4, int len) /* Recv data */ static int h4_recv(struct hci_uart *hu, void *data, int count) { - struct h4_struct *h4 = hu->priv; - register char *ptr; - struct hci_event_hdr *eh; - struct hci_acl_hdr *ah; - struct hci_sco_hdr *sh; - register int len, type, dlen; - - BT_DBG("hu %p count %d rx_state %ld rx_count %ld", - hu, count, h4->rx_state, h4->rx_count); - - ptr = data; - while (count) { - if (h4->rx_count) { - len = min_t(unsigned int, h4->rx_count, count); - memcpy(skb_put(h4->rx_skb, len), ptr, len); - h4->rx_count -= len; count -= len; ptr += len; - - if (h4->rx_count) - continue; - - switch (h4->rx_state) { - case H4_W4_DATA: - BT_DBG("Complete data"); - - hci_recv_frame(h4->rx_skb); - - h4->rx_state = H4_W4_PACKET_TYPE; - h4->rx_skb = NULL; - continue; - - case H4_W4_EVENT_HDR: - eh = hci_event_hdr(h4->rx_skb); - - BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); - - h4_check_data_len(h4, eh->plen); - continue; - - case H4_W4_ACL_HDR: - ah = hci_acl_hdr(h4->rx_skb); - dlen = __le16_to_cpu(ah->dlen); - - BT_DBG("ACL header: dlen %d", dlen); - - h4_check_data_len(h4, dlen); - continue; - - case H4_W4_SCO_HDR: - sh = hci_sco_hdr(h4->rx_skb); - - BT_DBG("SCO header: dlen %d", sh->dlen); - - h4_check_data_len(h4, sh->dlen); - continue; - } - } - - /* H4_W4_PACKET_TYPE */ - switch (*ptr) { - case HCI_EVENT_PKT: - BT_DBG("Event packet"); - h4->rx_state = H4_W4_EVENT_HDR; - h4->rx_count = HCI_EVENT_HDR_SIZE; - type = HCI_EVENT_PKT; - break; - - case HCI_ACLDATA_PKT: - BT_DBG("ACL packet"); - h4->rx_state = H4_W4_ACL_HDR; - h4->rx_count = HCI_ACL_HDR_SIZE; - type = HCI_ACLDATA_PKT; - break; - - case HCI_SCODATA_PKT: - BT_DBG("SCO packet"); - h4->rx_state = H4_W4_SCO_HDR; - h4->rx_count = HCI_SCO_HDR_SIZE; - type = HCI_SCODATA_PKT; - break; - - default: - BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr); - hu->hdev->stat.err_rx++; - ptr++; count--; - continue; - }; - - ptr++; count--; - - /* Allocate packet */ - h4->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); - if (!h4->rx_skb) { - BT_ERR("Can't allocate mem for new packet"); - h4->rx_state = H4_W4_PACKET_TYPE; - h4->rx_count = 0; - return -ENOMEM; - } - - h4->rx_skb->dev = (void *) hu->hdev; - bt_cb(h4->rx_skb)->pkt_type = type; - } + if (hci_recv_stream_fragment(hu->hdev, data, count) < 0) + BT_ERR("Frame Reassembly Failed"); return count; } @@ -272,7 +173,7 @@ static struct hci_uart_proto h4p = { .flush = h4_flush, }; -int h4_init(void) +int __init h4_init(void) { int err = hci_uart_register_proto(&h4p); @@ -284,7 +185,7 @@ int h4_init(void) return err; } -int h4_deinit(void) +int __exit h4_deinit(void) { return hci_uart_unregister_proto(&h4p); } diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 76a1abb8f2142772bd1993bf99d5771dcd1d980a..998833d93c13c724ffbc828ab0f62ad07e1c438c 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -210,7 +210,6 @@ static int hci_uart_close(struct hci_dev *hdev) static int hci_uart_send_frame(struct sk_buff *skb) { struct hci_dev* hdev = (struct hci_dev *) skb->dev; - struct tty_struct *tty; struct hci_uart *hu; if (!hdev) { @@ -222,7 +221,6 @@ static int hci_uart_send_frame(struct sk_buff *skb) return -EBUSY; hu = (struct hci_uart *) hdev->driver_data; - tty = hu->tty; BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); @@ -397,6 +395,9 @@ static int hci_uart_register_dev(struct hci_uart *hu) if (!reset) set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags)) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); @@ -477,6 +478,15 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file, return hu->hdev->id; return -EUNATCH; + case HCIUARTSETFLAGS: + if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + return -EBUSY; + hu->hdev_flags = arg; + break; + + case HCIUARTGETFLAGS: + return hu->hdev_flags; + default: err = n_tty_ioctl_helper(tty, file, cmd, arg); break; @@ -542,6 +552,9 @@ static int __init hci_uart_init(void) #ifdef CONFIG_BT_HCIUART_LL ll_init(); #endif +#ifdef CONFIG_BT_HCIUART_ATH3K + ath_init(); +#endif return 0; } @@ -559,6 +572,9 @@ static void __exit hci_uart_exit(void) #ifdef CONFIG_BT_HCIUART_LL ll_deinit(); #endif +#ifdef CONFIG_BT_HCIUART_ATH3K + ath_deinit(); +#endif /* Release tty registration of line discipline */ if ((err = tty_unregister_ldisc(N_HCI))) diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index fb8445c7365ee0052fbd10284d0a895c12259fdf..38595e782d02e1c39dcbd82cc0fa0c94b0babfd2 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -74,7 +74,7 @@ enum hcill_states_e { struct hcill_cmd { u8 cmd; -} __attribute__((packed)); +} __packed; struct ll_struct { unsigned long rx_state; @@ -517,7 +517,7 @@ static struct hci_uart_proto llp = { .flush = ll_flush, }; -int ll_init(void) +int __init ll_init(void) { int err = hci_uart_register_proto(&llp); @@ -529,7 +529,7 @@ int ll_init(void) return err; } -int ll_deinit(void) +int __exit ll_deinit(void) { return hci_uart_unregister_proto(&llp); } diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 50113db06b9f54144e455193279deec4900028cc..99fb35239d1fe35c6ec7979601da81ab771f7965 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -31,15 +31,20 @@ #define HCIUARTSETPROTO _IOW('U', 200, int) #define HCIUARTGETPROTO _IOR('U', 201, int) #define HCIUARTGETDEVICE _IOR('U', 202, int) +#define HCIUARTSETFLAGS _IOW('U', 203, int) +#define HCIUARTGETFLAGS _IOR('U', 204, int) /* UART protocols */ -#define HCI_UART_MAX_PROTO 5 +#define HCI_UART_MAX_PROTO 6 #define HCI_UART_H4 0 #define HCI_UART_BCSP 1 #define HCI_UART_3WIRE 2 #define HCI_UART_H4DS 3 #define HCI_UART_LL 4 +#define HCI_UART_ATH3K 5 + +#define HCI_UART_RAW_DEVICE 0 struct hci_uart; @@ -57,6 +62,7 @@ struct hci_uart { struct tty_struct *tty; struct hci_dev *hdev; unsigned long flags; + unsigned long hdev_flags; struct hci_uart_proto *proto; void *priv; @@ -66,7 +72,7 @@ struct hci_uart { spinlock_t rx_lock; }; -/* HCI_UART flag bits */ +/* HCI_UART proto flag bits */ #define HCI_UART_PROTO_SET 0 /* TX states */ @@ -91,3 +97,8 @@ int bcsp_deinit(void); int ll_init(void); int ll_deinit(void); #endif + +#ifdef CONFIG_BT_HCIUART_ATH3K +int ath_init(void); +int ath_deinit(void); +#endif diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 0b926e45afe2f0b64824d36b6acfbb50ed0d3afa..a5ea1bce9689601edb299eab667ea0be35290cd2 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -215,7 +215,7 @@ static int addr4_resolve(struct sockaddr_in *src_in, neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev); if (!neigh || !(neigh->nud_state & NUD_VALID)) { - neigh_event_send(rt->u.dst.neighbour, NULL); + neigh_event_send(rt->dst.neighbour, NULL); ret = -ENODATA; if (neigh) goto release; diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index ebfb117ba68b48da7f41b99a6bda9e84d028a1be..abd683ea326d4746622057708050f38cedca9b3d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1364,7 +1364,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) __func__); goto reject; } - dst = &rt->u.dst; + dst = &rt->dst; l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", @@ -1932,7 +1932,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) err = -EHOSTUNREACH; goto fail3; } - ep->dst = &rt->u.dst; + ep->dst = &rt->dst; /* get a l2t entry */ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 855ee44fdb52de5a23b847a313616b63120a8b9d..8c9b483a0d93064dc7ae7dc55ea511fcd98c1532 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1365,7 +1365,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) __func__); goto reject; } - dst = &rt->u.dst; + dst = &rt->dst; if (dst->neighbour->dev->flags & IFF_LOOPBACK) { pdev = ip_dev_find(&init_net, peer_ip); BUG_ON(!pdev); @@ -1939,7 +1939,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) err = -EHOSTUNREACH; goto fail3; } - ep->dst = &rt->u.dst; + ep->dst = &rt->dst; /* get a l2t entry */ if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) { diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 986d6f32ddedf8ac0d225aca4a61321469f1d3a1..d876d0435cd4b30316d7e6cdd3976bf2a6db52a0 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1146,7 +1146,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi } if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) - neigh_event_send(rt->u.dst.neighbour, NULL); + neigh_event_send(rt->dst.neighbour, NULL); ip_rt_put(rt); return rc; diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 5cc0a9ae5bb147c91e4f72297085f083f597abaa..42e7aad1ec234301fc85ec4b6188d5107298c899 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1567,6 +1567,12 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd } +static int nes_netdev_set_flags(struct net_device *netdev, u32 flags) +{ + return ethtool_op_set_flags(netdev, flags, ETH_FLAG_LRO); +} + + static const struct ethtool_ops nes_ethtool_ops = { .get_link = ethtool_op_get_link, .get_settings = nes_netdev_get_settings, @@ -1588,7 +1594,7 @@ static const struct ethtool_ops nes_ethtool_ops = { .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, + .set_flags = nes_netdev_set_flags, }; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 40e858492f905055276c3346f305fc6ac1353775..1a1657c82edd61eaf3877963024516d705622e61 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -147,6 +147,11 @@ static void ipoib_get_ethtool_stats(struct net_device *dev, data[index++] = priv->lro.lro_mgr.stats.no_desc; } +static int ipoib_set_flags(struct net_device *dev, u32 flags) +{ + return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO); +} + static const struct ethtool_ops ipoib_ethtool_ops = { .get_drvinfo = ipoib_get_drvinfo, .get_rx_csum = ipoib_get_rx_csum, @@ -154,7 +159,7 @@ static const struct ethtool_ops ipoib_ethtool_ops = { .get_coalesce = ipoib_get_coalesce, .set_coalesce = ipoib_set_coalesce, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, + .set_flags = ipoib_set_flags, .get_strings = ipoib_get_strings, .get_sset_count = ipoib_get_sset_count, .get_ethtool_stats = ipoib_get_ethtool_stats, diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 0cabe31f26df3f5929842f28b63b0784e5c8c05c..f80a7c48a35f852f4d5912f42a6af846769c9fa4 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -50,6 +49,7 @@ MODULE_LICENSE("GPL"); /* -------- driver information -------------------------------------- */ +static DEFINE_MUTEX(capi_mutex); static struct class *capi_class; static int capi_major = 68; /* allocated */ @@ -691,7 +691,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) static ssize_t capi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct capidev *cdev = (struct capidev *)file->private_data; + struct capidev *cdev = file->private_data; struct sk_buff *skb; size_t copied; int err; @@ -726,7 +726,7 @@ capi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) static ssize_t capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct capidev *cdev = (struct capidev *)file->private_data; + struct capidev *cdev = file->private_data; struct sk_buff *skb; u16 mlen; @@ -773,7 +773,7 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos static unsigned int capi_poll(struct file *file, poll_table * wait) { - struct capidev *cdev = (struct capidev *)file->private_data; + struct capidev *cdev = file->private_data; unsigned int mask = 0; if (!cdev->ap.applid) @@ -985,9 +985,9 @@ capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&capi_mutex); ret = capi_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&capi_mutex); return ret; } diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index bf55ed5f38e37ff74a35c051356f470330476ac7..2978bdaa6b8823473a029bce120237b94e133c2f 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -1450,12 +1450,9 @@ static void handle_dtrace_data(capidrv_contr *card, } for (p = data, end = data+len; p < end; p++) { - u8 w; PUTBYTE_TO_STATUS(card, ' '); - w = (*p >> 4) & 0xf; - PUTBYTE_TO_STATUS(card, (w < 10) ? '0'+w : 'A'-10+w); - w = *p & 0xf; - PUTBYTE_TO_STATUS(card, (w < 10) ? '0'+w : 'A'-10+w); + PUTBYTE_TO_STATUS(card, hex_asc_hi(*p)); + PUTBYTE_TO_STATUS(card, hex_asc_lo(*p)); } PUTBYTE_TO_STATUS(card, '\n'); diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index c53e2417e7d4aca7130160d27dbf4b32815c1e1d..33ec9e4677727800d5439e16300be820c92090b8 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include "isdn_divert.h" @@ -28,6 +28,7 @@ /* Variables for interface queue */ /*********************************/ ulong if_used = 0; /* number of interface users */ +static DEFINE_MUTEX(isdn_divert_mutex); static struct divert_info *divert_info_head = NULL; /* head of queue */ static struct divert_info *divert_info_tail = NULL; /* pointer to last entry */ static DEFINE_SPINLOCK(divert_info_lock);/* lock for queue */ @@ -261,9 +262,9 @@ static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg) { long ret; - lock_kernel(); + mutex_lock(&isdn_divert_mutex); ret = isdn_divert_ioctl_unlocked(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&isdn_divert_mutex); return ret; } diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig index dcefedc7044ad5b1e6c6bf7eb40275b40f9c5bf4..b18a92c32184721f6dfb658d30ed017207a76aab 100644 --- a/drivers/isdn/gigaset/Kconfig +++ b/drivers/isdn/gigaset/Kconfig @@ -17,8 +17,7 @@ menuconfig ISDN_DRV_GIGASET if ISDN_DRV_GIGASET config GIGASET_CAPI - bool "Gigaset CAPI support (EXPERIMENTAL)" - depends on EXPERIMENTAL + bool "Gigaset CAPI support" depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m') default ISDN_I4L='n' help @@ -27,6 +26,7 @@ config GIGASET_CAPI subsystem you'll have to enable the capidrv glue driver. (select ISDN_CAPI_CAPIDRV.) Say N to build the old native ISDN4Linux variant. + If unsure, say Y. config GIGASET_I4L bool diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 47a5ffec55a393173de8fd01ef8019b2d7115bdd..0ded3640b926ea6edf86955a04978b6a4c78e776 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -1188,24 +1188,6 @@ static void write_iso_tasklet(unsigned long data) break; } } -#ifdef CONFIG_GIGASET_DEBUG - /* check assumption on remaining frames */ - for (; i < BAS_NUMFRAMES; i++) { - ifd = &urb->iso_frame_desc[i]; - if (ifd->status != -EINPROGRESS - || ifd->actual_length != 0) { - dev_warn(cs->dev, - "isochronous write: frame %d: %s, " - "%d of %d bytes sent\n", - i, get_usb_statmsg(ifd->status), - ifd->actual_length, ifd->length); - offset = (ifd->offset + - ifd->actual_length) - % BAS_OUTBUFSIZE; - break; - } - } -#endif break; case -EPIPE: /* stall - probably underrun */ dev_err(cs->dev, "isochronous write stalled\n"); @@ -1913,65 +1895,41 @@ static int start_cbsend(struct cardstate *cs) * USB transmission is started if necessary. * parameters: * cs controller state structure - * buf command string to send - * len number of bytes to send (max. IF_WRITEBUF) - * wake_tasklet tasklet to run when transmission is completed - * (NULL if none) + * cb command buffer structure * return value: * number of bytes queued on success * error code < 0 on error */ -static int gigaset_write_cmd(struct cardstate *cs, - const unsigned char *buf, int len, - struct tasklet_struct *wake_tasklet) +static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) { - struct cmdbuf_t *cb; unsigned long flags; int rc; gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? DEBUG_TRANSCMD : DEBUG_LOCKCMD, - "CMD Transmit", len, buf); - - if (len <= 0) { - /* nothing to do */ - rc = 0; - goto notqueued; - } + "CMD Transmit", cb->len, cb->buf); /* translate "+++" escape sequence sent as a single separate command * into "close AT channel" command for error recovery * The next command will reopen the AT channel automatically. */ - if (len == 3 && !memcmp(buf, "+++", 3)) { + if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) { + kfree(cb); rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); - goto notqueued; + if (cb->wake_tasklet) + tasklet_schedule(cb->wake_tasklet); + return rc < 0 ? rc : cb->len; } - if (len > IF_WRITEBUF) - len = IF_WRITEBUF; - cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC); - if (!cb) { - dev_err(cs->dev, "%s: out of memory\n", __func__); - rc = -ENOMEM; - goto notqueued; - } - - memcpy(cb->buf, buf, len); - cb->len = len; - cb->offset = 0; - cb->next = NULL; - cb->wake_tasklet = wake_tasklet; - spin_lock_irqsave(&cs->cmdlock, flags); cb->prev = cs->lastcmdbuf; if (cs->lastcmdbuf) cs->lastcmdbuf->next = cb; else { cs->cmdbuf = cb; - cs->curlen = len; + cs->curlen = cb->len; } - cs->cmdbytes += len; + cs->cmdbytes += cb->len; cs->lastcmdbuf = cb; spin_unlock_irqrestore(&cs->cmdlock, flags); @@ -1988,12 +1946,7 @@ static int gigaset_write_cmd(struct cardstate *cs, } rc = start_cbsend(cs); spin_unlock_irqrestore(&cs->lock, flags); - return rc < 0 ? rc : len; - -notqueued: /* request handled without queuing */ - if (wake_tasklet) - tasklet_schedule(wake_tasklet); - return rc; + return rc < 0 ? rc : cb->len; } /* gigaset_write_room diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 6fbe8999c4191e6b5b7d1ac78512c1135803f4c5..e5ea344a551a14d32cbad7a6941720606af0a32d 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -45,6 +45,7 @@ #define CAPI_FACILITY_LI 0x0005 #define CAPI_SUPPSVC_GETSUPPORTED 0x0000 +#define CAPI_SUPPSVC_LISTEN 0x0001 /* missing from capiutil.h */ #define CAPIMSG_PLCI_PART(m) CAPIMSG_U8(m, 9) @@ -270,9 +271,13 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag, kfree(dbgline); if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 && (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ || - CAPIMSG_SUBCOMMAND(data) == CAPI_IND) && - CAPIMSG_DATALEN(data) > 0) { + CAPIMSG_SUBCOMMAND(data) == CAPI_IND)) { l = CAPIMSG_DATALEN(data); + gig_dbg(level, " DataLength=%d", l); + if (l <= 0 || !(gigaset_debuglevel & DEBUG_LLDATA)) + return; + if (l > 64) + l = 64; /* arbitrary limit */ dbgline = kmalloc(3*l, GFP_ATOMIC); if (!dbgline) return; @@ -378,13 +383,13 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb) ++bcs->trans_up; if (!ap) { - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); return; } /* don't send further B3 messages if disconnected */ if (bcs->apconnstate < APCONN_ACTIVE) { - gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack"); + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); return; } @@ -422,13 +427,14 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) bcs->trans_down++; if (!ap) { - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); + dev_kfree_skb_any(skb); return; } /* don't send further B3 messages if disconnected */ if (bcs->apconnstate < APCONN_ACTIVE) { - gig_dbg(DEBUG_LLDATA, "disconnected, discarding data"); + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); dev_kfree_skb_any(skb); return; } @@ -454,7 +460,7 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) /* Data64 parameter not present */ /* emit message */ - dump_rawmsg(DEBUG_LLDATA, "DATA_B3_IND", skb->data); + dump_rawmsg(DEBUG_MCMD, __func__, skb->data); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } EXPORT_SYMBOL_GPL(gigaset_skb_rcvd); @@ -747,7 +753,7 @@ void gigaset_isdn_connD(struct bc_state *bcs) ap = bcs->ap; if (!ap) { spin_unlock_irqrestore(&bcs->aplock, flags); - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } if (bcs->apconnstate == APCONN_NONE) { @@ -843,7 +849,7 @@ void gigaset_isdn_connB(struct bc_state *bcs) ap = bcs->ap; if (!ap) { spin_unlock_irqrestore(&bcs->aplock, flags); - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } if (!bcs->apconnstate) { @@ -901,13 +907,12 @@ void gigaset_isdn_connB(struct bc_state *bcs) */ void gigaset_isdn_hupB(struct bc_state *bcs) { - struct cardstate *cs = bcs->cs; struct gigaset_capi_appl *ap = bcs->ap; /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */ if (!ap) { - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } @@ -978,6 +983,9 @@ static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, struct cardstate *cs = ctr->driverdata; struct gigaset_capi_appl *ap; + gig_dbg(DEBUG_CMD, "%s [%u] l3cnt=%u blkcnt=%u blklen=%u", + __func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen); + list_for_each_entry(ap, &iif->appls, ctrlist) if (ap->id == appl) { dev_notice(cs->dev, @@ -1062,6 +1070,8 @@ static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl) struct gigaset_capi_appl *ap, *tmp; unsigned ch; + gig_dbg(DEBUG_CMD, "%s [%u]", __func__, appl); + list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist) if (ap->id == appl) { /* remove from any channels */ @@ -1142,7 +1152,7 @@ static void do_facility_req(struct gigaset_capi_ctr *iif, case CAPI_FACILITY_SUPPSVC: /* decode Function parameter */ pparam = cmsg->FacilityRequestParameter; - if (pparam == NULL || *pparam < 2) { + if (pparam == NULL || pparam[0] < 2) { dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ", "Facility Request Parameter"); send_conf(iif, ap, skb, CapiIllMessageParmCoding); @@ -1159,8 +1169,32 @@ static void do_facility_req(struct gigaset_capi_ctr *iif, /* Supported Services: none */ capimsg_setu32(confparam, 6, 0); break; + case CAPI_SUPPSVC_LISTEN: + if (pparam[0] < 7 || pparam[3] < 4) { + dev_notice(cs->dev, "%s: %s missing\n", + "FACILITY_REQ", "Notification Mask"); + send_conf(iif, ap, skb, + CapiIllMessageParmCoding); + return; + } + if (CAPIMSG_U32(pparam, 4) != 0) { + dev_notice(cs->dev, + "%s: unsupported supplementary service notification mask 0x%x\n", + "FACILITY_REQ", CAPIMSG_U32(pparam, 4)); + info = CapiFacilitySpecificFunctionNotSupported; + confparam[3] = 2; /* length */ + capimsg_setu16(confparam, 4, + CapiSupplementaryServiceNotSupported); + } + info = CapiSuccess; + confparam[3] = 2; /* length */ + capimsg_setu16(confparam, 4, CapiSuccess); + break; /* ToDo: add supported services */ default: + dev_notice(cs->dev, + "%s: unsupported supplementary service function 0x%04x\n", + "FACILITY_REQ", function); info = CapiFacilitySpecificFunctionNotSupported; /* Supplementary Service specific parameter */ confparam[3] = 2; /* length */ @@ -1951,11 +1985,7 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif, u16 handle = CAPIMSG_HANDLE_REQ(skb->data); /* frequent message, avoid _cmsg overhead */ - dump_rawmsg(DEBUG_LLDATA, "DATA_B3_REQ", skb->data); - - gig_dbg(DEBUG_LLDATA, - "Receiving data from LL (ch: %d, flg: %x, sz: %d|%d)", - channel, flags, msglen, datalen); + dump_rawmsg(DEBUG_MCMD, __func__, skb->data); /* check parameters */ if (channel == 0 || channel > cs->channels || ncci != 1) { @@ -2064,7 +2094,7 @@ static void do_data_b3_resp(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { - dump_rawmsg(DEBUG_LLDATA, __func__, skb->data); + dump_rawmsg(DEBUG_MCMD, __func__, skb->data); dev_kfree_skb_any(skb); } diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index 5d4befb81057a1c7675cb86d05b8cb82737495ec..3ca561eccd9f68dc85ccc26c99f37a4562485392 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c @@ -791,8 +791,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, spin_unlock_irqrestore(&cs->lock, flags); setup_timer(&cs->timer, timer_tick, (unsigned long) cs); cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK); - /* FIXME: can jiffies increase too much until the timer is added? - * Same problem(?) with mod_timer() in timer_tick(). */ add_timer(&cs->timer); gig_dbg(DEBUG_INIT, "cs initialized"); diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index ceaef9a04a42410b876de74558a51301d3a89c3e..a14187605f5e477549b11143d5da129c55aec9b0 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -35,53 +35,40 @@ #define RT_RING 2 #define RT_NUMBER 3 #define RT_STRING 4 -#define RT_HEX 5 #define RT_ZCAU 6 /* Possible ASCII responses */ #define RSP_OK 0 -#define RSP_BUSY 1 -#define RSP_CONNECT 2 +#define RSP_ERROR 1 #define RSP_ZGCI 3 #define RSP_RING 4 -#define RSP_ZAOC 5 -#define RSP_ZCSTR 6 -#define RSP_ZCFGT 7 -#define RSP_ZCFG 8 -#define RSP_ZCCR 9 -#define RSP_EMPTY 10 -#define RSP_ZLOG 11 -#define RSP_ZCAU 12 -#define RSP_ZMWI 13 -#define RSP_ZABINFO 14 -#define RSP_ZSMLSTCHG 15 +#define RSP_ZVLS 5 +#define RSP_ZCAU 6 + +/* responses with values to store in at_state */ +/* - numeric */ #define RSP_VAR 100 #define RSP_ZSAU (RSP_VAR + VAR_ZSAU) #define RSP_ZDLE (RSP_VAR + VAR_ZDLE) -#define RSP_ZVLS (RSP_VAR + VAR_ZVLS) #define RSP_ZCTP (RSP_VAR + VAR_ZCTP) +/* - string */ #define RSP_STR (RSP_VAR + VAR_NUM) #define RSP_NMBR (RSP_STR + STR_NMBR) #define RSP_ZCPN (RSP_STR + STR_ZCPN) #define RSP_ZCON (RSP_STR + STR_ZCON) #define RSP_ZBC (RSP_STR + STR_ZBC) #define RSP_ZHLC (RSP_STR + STR_ZHLC) -#define RSP_ERROR -1 /* ERROR */ + #define RSP_WRONG_CID -2 /* unknown cid in cmd */ -#define RSP_UNKNOWN -4 /* unknown response */ -#define RSP_FAIL -5 /* internal error */ #define RSP_INVAL -6 /* invalid response */ +#define RSP_NODEV -9 /* device not connected */ #define RSP_NONE -19 #define RSP_STRING -20 #define RSP_NULL -21 -#define RSP_RETRYFAIL -22 -#define RSP_RETRY -23 -#define RSP_SKIP -24 #define RSP_INIT -27 #define RSP_ANY -26 #define RSP_LAST -28 -#define RSP_NODEV -9 /* actions for process_response */ #define ACT_NOTHING 0 @@ -259,13 +246,6 @@ struct reply_t gigaset_tab_nocid[] = /* misc. */ {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, -{RSP_ZCFGT, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZCFG, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZLOG, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZMWI, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZABINFO, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZSMLSTCHG, -1, -1, -1, -1, -1, {ACT_DEBUG} }, - {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, @@ -361,10 +341,6 @@ struct reply_t gigaset_tab_cid[] = /* misc. */ {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZCCR, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZAOC, -1, -1, -1, -1, -1, {ACT_DEBUG} }, -{RSP_ZCSTR, -1, -1, -1, -1, -1, {ACT_DEBUG} }, - {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, @@ -387,20 +363,11 @@ static const struct resp_type_t { {"ZVLS", RSP_ZVLS, RT_NUMBER}, {"ZCTP", RSP_ZCTP, RT_NUMBER}, {"ZDLE", RSP_ZDLE, RT_NUMBER}, - {"ZCFGT", RSP_ZCFGT, RT_NUMBER}, - {"ZCCR", RSP_ZCCR, RT_NUMBER}, - {"ZMWI", RSP_ZMWI, RT_NUMBER}, {"ZHLC", RSP_ZHLC, RT_STRING}, {"ZBC", RSP_ZBC, RT_STRING}, {"NMBR", RSP_NMBR, RT_STRING}, {"ZCPN", RSP_ZCPN, RT_STRING}, {"ZCON", RSP_ZCON, RT_STRING}, - {"ZAOC", RSP_ZAOC, RT_STRING}, - {"ZCSTR", RSP_ZCSTR, RT_STRING}, - {"ZCFG", RSP_ZCFG, RT_HEX}, - {"ZLOG", RSP_ZLOG, RT_NOTHING}, - {"ZABINFO", RSP_ZABINFO, RT_NOTHING}, - {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING}, {NULL, 0, 0} }; @@ -418,64 +385,18 @@ static const struct zsau_resp_t { {NULL, ZSAU_UNKNOWN} }; -/* - * Get integer from char-pointer - */ -static int isdn_getnum(char *p) -{ - int v = -1; - - gig_dbg(DEBUG_EVENT, "string: %s", p); - - while (*p >= '0' && *p <= '9') - v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p++) - '0'); - if (*p) - v = -1; /* invalid Character */ - return v; -} - -/* - * Get integer from char-pointer - */ -static int isdn_gethex(char *p) -{ - int v = 0; - int c; - - gig_dbg(DEBUG_EVENT, "string: %s", p); - - if (!*p) - return -1; - - do { - if (v > (INT_MAX - 15) / 16) - return -1; - c = *p; - if (c >= '0' && c <= '9') - c -= '0'; - else if (c >= 'a' && c <= 'f') - c -= 'a' - 10; - else if (c >= 'A' && c <= 'F') - c -= 'A' - 10; - else - return -1; - v = v * 16 + c; - } while (*++p); - - return v; -} - /* retrieve CID from parsed response * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535 */ static int cid_of_response(char *s) { - int cid; + unsigned long cid; + int rc; if (s[-1] != ';') return 0; /* no CID separator */ - cid = isdn_getnum(s); - if (cid < 0) + rc = strict_strtoul(s, 10, &cid); + if (rc) return 0; /* CID not numeric */ if (cid < 1 || cid > 65535) return -1; /* CID out of range */ @@ -588,10 +509,10 @@ void gigaset_handle_modem_response(struct cardstate *cs) break; if (!rt->response) { - event->type = RSP_UNKNOWN; - dev_warn(cs->dev, - "unknown modem response: %s\n", - argv[curarg]); + event->type = RSP_NONE; + gig_dbg(DEBUG_EVENT, + "unknown modem response: '%s'\n", + argv[curarg]); break; } @@ -645,27 +566,27 @@ void gigaset_handle_modem_response(struct cardstate *cs) case RT_ZCAU: event->parameter = -1; if (curarg + 1 < params) { - i = isdn_gethex(argv[curarg]); - j = isdn_gethex(argv[curarg + 1]); - if (i >= 0 && i < 256 && j >= 0 && j < 256) - event->parameter = (unsigned) i << 8 - | j; - curarg += 2; + unsigned long type, value; + + i = strict_strtoul(argv[curarg++], 16, &type); + j = strict_strtoul(argv[curarg++], 16, &value); + + if (i == 0 && type < 256 && + j == 0 && value < 256) + event->parameter = (type << 8) | value; } else curarg = params - 1; break; case RT_NUMBER: - case RT_HEX: + event->parameter = -1; if (curarg < params) { - if (param_type == RT_HEX) - event->parameter = - isdn_gethex(argv[curarg]); - else - event->parameter = - isdn_getnum(argv[curarg]); - ++curarg; - } else - event->parameter = -1; + unsigned long res; + int rc; + + rc = strict_strtoul(argv[curarg++], 10, &res); + if (rc == 0) + event->parameter = res; + } gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter); break; } @@ -797,48 +718,27 @@ static void schedule_init(struct cardstate *cs, int state) static void send_command(struct cardstate *cs, const char *cmd, int cid, int dle, gfp_t kmallocflags) { - size_t cmdlen, buflen; - char *cmdpos, *cmdbuf, *cmdtail; + struct cmdbuf_t *cb; + size_t buflen; - cmdlen = strlen(cmd); - buflen = 11 + cmdlen; - if (unlikely(buflen <= cmdlen)) { - dev_err(cs->dev, "integer overflow in buflen\n"); + buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 DLE ) \0 */ + cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, kmallocflags); + if (!cb) { + dev_err(cs->dev, "%s: out of memory\n", __func__); return; } - - cmdbuf = kmalloc(buflen, kmallocflags); - if (unlikely(!cmdbuf)) { - dev_err(cs->dev, "out of memory\n"); - return; - } - - cmdpos = cmdbuf + 9; - cmdtail = cmdpos + cmdlen; - memcpy(cmdpos, cmd, cmdlen); - - if (cid > 0 && cid <= 65535) { - do { - *--cmdpos = '0' + cid % 10; - cid /= 10; - ++cmdlen; - } while (cid); - } - - cmdlen += 2; - *--cmdpos = 'T'; - *--cmdpos = 'A'; - - if (dle) { - cmdlen += 4; - *--cmdpos = '('; - *--cmdpos = 0x10; - *cmdtail++ = 0x10; - *cmdtail++ = ')'; - } - - cs->ops->write_cmd(cs, cmdpos, cmdlen, NULL); - kfree(cmdbuf); + if (cid > 0 && cid <= 65535) + cb->len = snprintf(cb->buf, buflen, + dle ? "\020(AT%d%s\020)" : "AT%d%s", + cid, cmd); + else + cb->len = snprintf(cb->buf, buflen, + dle ? "\020(AT%s\020)" : "AT%s", + cmd); + cb->offset = 0; + cb->next = NULL; + cb->wake_tasklet = NULL; + cs->ops->write_cmd(cs, cb); } static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid) @@ -1240,8 +1140,22 @@ static void do_action(int action, struct cardstate *cs, break; case ACT_HUPMODEM: /* send "+++" (hangup in unimodem mode) */ - if (cs->connected) - cs->ops->write_cmd(cs, "+++", 3, NULL); + if (cs->connected) { + struct cmdbuf_t *cb; + + cb = kmalloc(sizeof(struct cmdbuf_t) + 3, GFP_ATOMIC); + if (!cb) { + dev_err(cs->dev, "%s: out of memory\n", + __func__); + return; + } + memcpy(cb->buf, "+++", 3); + cb->len = 3; + cb->offset = 0; + cb->next = NULL; + cb->wake_tasklet = NULL; + cs->ops->write_cmd(cs, cb); + } break; case ACT_RING: /* get fresh AT state structure for new CID */ @@ -1855,19 +1769,13 @@ static void process_command_flags(struct cardstate *cs) gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE"); cs->commands_pending = 1; return; -#ifdef GIG_MAYINITONDIAL case M_UNKNOWN: schedule_init(cs, MS_INIT); return; -#endif } bcs->at_state.pending_commands &= ~PC_CID; cs->curchannel = bcs->channel; -#ifdef GIG_RETRYCID cs->retry_count = 2; -#else - cs->retry_count = 1; -#endif schedule_sequence(cs, &cs->at_state, SEQ_CID); return; } diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 8738b0821fc9c29ea7ddcbec6a888db709766272..a69512fb11951d2930c97798c50c2c52acf7c069 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h @@ -46,13 +46,6 @@ #define RBUFSIZE 8192 -/* compile time options */ -#define GIG_MAJOR 0 - -#define GIG_MAYINITONDIAL -#define GIG_RETRYCID -#define GIG_X75 - #define GIG_TICK 100 /* in milliseconds */ /* timeout values (unit: 1 sec) */ @@ -193,9 +186,8 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, /* variables in struct at_state_t */ #define VAR_ZSAU 0 #define VAR_ZDLE 1 -#define VAR_ZVLS 2 -#define VAR_ZCTP 3 -#define VAR_NUM 4 +#define VAR_ZCTP 2 +#define VAR_NUM 3 #define STR_NMBR 0 #define STR_ZCPN 1 @@ -574,9 +566,7 @@ struct bas_bc_state { struct gigaset_ops { /* Called from ev-layer.c/interface.c for sending AT commands to the device */ - int (*write_cmd)(struct cardstate *cs, - const unsigned char *buf, int len, - struct tasklet_struct *wake_tasklet); + int (*write_cmd)(struct cardstate *cs, struct cmdbuf_t *cb); /* Called from interface.c for additional device control */ int (*write_room)(struct cardstate *cs); diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index f01c3c2e2e46eb5f5194fa8afa8ba78e8d3b8532..34bca37d65b9b4a1cd15beb6d924f91a75a60dea 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c @@ -419,6 +419,8 @@ static int command_from_LL(isdn_ctrl *cntrl) dev_err(bcs->cs->dev, "out of memory\n"); for (i = 0; i < AT_NUM; ++i) kfree(commands[i]); + kfree(commands); + gigaset_free_channel(bcs); return -ENOMEM; } @@ -643,9 +645,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) iif->maxbufsize = MAX_BUF_SIZE; iif->features = ISDN_FEATURE_L2_TRANS | ISDN_FEATURE_L2_HDLC | -#ifdef GIG_X75 ISDN_FEATURE_L2_X75I | -#endif ISDN_FEATURE_L3_TRANS | ISDN_FEATURE_P_EURO; iif->hl_hdrlen = HW_HDR_LEN; /* Area for storing ack */ diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index c9f28dd40d5c463d1d12821e47965655da123097..bb710d16a5264a378e81305438007a29bc0303d8 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c @@ -339,7 +339,8 @@ static int if_tiocmset(struct tty_struct *tty, struct file *file, static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct cardstate *cs; - int retval = -ENODEV; + struct cmdbuf_t *cb; + int retval; cs = (struct cardstate *) tty->driver_data; if (!cs) { @@ -355,18 +356,39 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) if (!cs->connected) { gig_dbg(DEBUG_IF, "not connected"); retval = -ENODEV; - } else if (!cs->open_count) + goto done; + } + if (!cs->open_count) { dev_warn(cs->dev, "%s: device not opened\n", __func__); - else if (cs->mstate != MS_LOCKED) { + retval = -ENODEV; + goto done; + } + if (cs->mstate != MS_LOCKED) { dev_warn(cs->dev, "can't write to unlocked device\n"); retval = -EBUSY; - } else { - retval = cs->ops->write_cmd(cs, buf, count, - &cs->if_wake_tasklet); + goto done; + } + if (count <= 0) { + /* nothing to do */ + retval = 0; + goto done; } - mutex_unlock(&cs->mutex); + cb = kmalloc(sizeof(struct cmdbuf_t) + count, GFP_KERNEL); + if (!cb) { + dev_err(cs->dev, "%s: out of memory\n", __func__); + retval = -ENOMEM; + goto done; + } + memcpy(cb->buf, buf, count); + cb->len = count; + cb->offset = 0; + cb->next = NULL; + cb->wake_tasklet = &cs->if_wake_tasklet; + retval = cs->ops->write_cmd(cs, cb); +done: + mutex_unlock(&cs->mutex); return retval; } @@ -655,7 +677,6 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname, goto enomem; tty->magic = TTY_DRIVER_MAGIC, - tty->major = GIG_MAJOR, tty->type = TTY_DRIVER_TYPE_SERIAL, tty->subtype = SERIAL_TYPE_NORMAL, tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index e96c0586886c14b5b626427f6199144d84ae4123..d151dcbf770d7dfe668643d65dad9574bc06716e 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -241,30 +241,13 @@ static void flush_send_queue(struct cardstate *cs) * return value: * number of bytes queued, or error code < 0 */ -static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, - int len, struct tasklet_struct *wake_tasklet) +static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) { - struct cmdbuf_t *cb; unsigned long flags; gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? DEBUG_TRANSCMD : DEBUG_LOCKCMD, - "CMD Transmit", len, buf); - - if (len <= 0) - return 0; - - cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC); - if (!cb) { - dev_err(cs->dev, "%s: out of memory!\n", __func__); - return -ENOMEM; - } - - memcpy(cb->buf, buf, len); - cb->len = len; - cb->offset = 0; - cb->next = NULL; - cb->wake_tasklet = wake_tasklet; + "CMD Transmit", cb->len, cb->buf); spin_lock_irqsave(&cs->cmdlock, flags); cb->prev = cs->lastcmdbuf; @@ -272,9 +255,9 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, cs->lastcmdbuf->next = cb; else { cs->cmdbuf = cb; - cs->curlen = len; + cs->curlen = cb->len; } - cs->cmdbytes += len; + cs->cmdbytes += cb->len; cs->lastcmdbuf = cb; spin_unlock_irqrestore(&cs->cmdlock, flags); @@ -282,7 +265,7 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, if (cs->connected) tasklet_schedule(&cs->write_tasklet); spin_unlock_irqrestore(&cs->lock, flags); - return len; + return cb->len; } /* diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 76dbb20f3065aeb23f710b99c401286c91050bfa..4a66338f4e7d6ae6dc8e8e62446a7b9ea5ef7a40 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -39,7 +39,8 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode"); #define GIGASET_MODULENAME "usb_gigaset" #define GIGASET_DEVNAME "ttyGU" -#define IF_WRITEBUF 2000 /* arbitrary limit */ +/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */ +#define IF_WRITEBUF 264 /* Values for the Gigaset M105 Data */ #define USB_M105_VENDOR_ID 0x0681 @@ -493,29 +494,13 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) } /* Send command to device. */ -static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, - int len, struct tasklet_struct *wake_tasklet) +static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) { - struct cmdbuf_t *cb; unsigned long flags; gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? DEBUG_TRANSCMD : DEBUG_LOCKCMD, - "CMD Transmit", len, buf); - - if (len <= 0) - return 0; - cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC); - if (!cb) { - dev_err(cs->dev, "%s: out of memory\n", __func__); - return -ENOMEM; - } - - memcpy(cb->buf, buf, len); - cb->len = len; - cb->offset = 0; - cb->next = NULL; - cb->wake_tasklet = wake_tasklet; + "CMD Transmit", cb->len, cb->buf); spin_lock_irqsave(&cs->cmdlock, flags); cb->prev = cs->lastcmdbuf; @@ -523,9 +508,9 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, cs->lastcmdbuf->next = cb; else { cs->cmdbuf = cb; - cs->curlen = len; + cs->curlen = cb->len; } - cs->cmdbytes += len; + cs->cmdbytes += cb->len; cs->lastcmdbuf = cb; spin_unlock_irqrestore(&cs->cmdlock, flags); @@ -533,7 +518,7 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, if (cs->connected) tasklet_schedule(&cs->write_tasklet); spin_unlock_irqrestore(&cs->lock, flags); - return len; + return cb->len; } static int gigaset_write_room(struct cardstate *cs) diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c index 1e85f743214e3507a03eabd05a829539d86e300d..f1d464f1e107a12c0d8191c309ab124a9c30e934 100644 --- a/drivers/isdn/hardware/eicon/divamnt.c +++ b/drivers/isdn/hardware/eicon/divamnt.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include "platform.h" @@ -22,6 +22,7 @@ #include "divasync.h" #include "debug_if.h" +static DEFINE_MUTEX(maint_mutex); static char *main_revision = "$Revision: 1.32.6.10 $"; static int major; @@ -130,7 +131,7 @@ static int maint_open(struct inode *ino, struct file *filep) { int ret; - lock_kernel(); + mutex_lock(&maint_mutex); /* only one open is allowed, so we test it atomically */ if (test_and_set_bit(0, &opened)) @@ -139,7 +140,7 @@ static int maint_open(struct inode *ino, struct file *filep) filep->private_data = NULL; ret = nonseekable_open(ino, filep); } - unlock_kernel(); + mutex_unlock(&maint_mutex); return ret; } diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c index f577719ab3fa3a71d04bd7c81ee311e438efd1e0..42d3b83460346a18a51f1db5ffbb20606ff5140a 100644 --- a/drivers/isdn/hardware/eicon/divasi.c +++ b/drivers/isdn/hardware/eicon/divasi.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include "platform.h" @@ -402,7 +401,6 @@ static unsigned int um_idi_poll(struct file *file, poll_table * wait) static int um_idi_open(struct inode *inode, struct file *file) { - cycle_kernel_lock(); return (0); } diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index fbbcb27fb6819bcda103ba9e1d5697e6e89bb8c7..ed9c555067975cec3d6d443c06249fd027e791d5 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c @@ -21,7 +21,6 @@ #include #include #include -#include #include "platform.h" #undef ID_MASK @@ -113,41 +112,40 @@ typedef struct _diva_os_thread_dpc { This table should be sorted by PCI device ID */ static struct pci_device_id divas_pci_tbl[] = { -/* Diva Server BRI-2M PCI 0xE010 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_MAESTRA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_MAESTRA_PCI}, -/* Diva Server 4BRI-8M PCI 0xE012 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_MAESTRAQ, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_Q_8M_PCI}, -/* Diva Server 4BRI-8M 2.0 PCI 0xE013 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_MAESTRAQ_U, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_Q_8M_V2_PCI}, -/* Diva Server PRI-30M PCI 0xE014 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_MAESTRAP, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_P_30M_PCI}, -/* Diva Server PRI 2.0 adapter 0xE015 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_P_30M_V2_PCI}, -/* Diva Server Voice 4BRI-8M PCI 0xE016 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_4BRI_VOIP, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_VOICE_Q_8M_PCI}, -/* Diva Server Voice 4BRI-8M 2.0 PCI 0xE017 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_4BRI_2_VOIP, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI}, -/* Diva Server BRI-2M 2.0 PCI 0xE018 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_BRI2M_2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_B_2M_V2_PCI}, -/* Diva Server Voice PRI 2.0 PCI 0xE019 */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI}, -/* Diva Server 2FX 0xE01A */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_2F, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_B_2F_PCI}, -/* Diva Server Voice BRI-2M 2.0 PCI 0xE01B */ - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_BRI2M_2_VOIP, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI}, - {0,} /* 0 terminated list. */ + /* Diva Server BRI-2M PCI 0xE010 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA), + CARDTYPE_MAESTRA_PCI }, + /* Diva Server 4BRI-8M PCI 0xE012 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAQ), + CARDTYPE_DIVASRV_Q_8M_PCI }, + /* Diva Server 4BRI-8M 2.0 PCI 0xE013 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAQ_U), + CARDTYPE_DIVASRV_Q_8M_V2_PCI }, + /* Diva Server PRI-30M PCI 0xE014 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP), + CARDTYPE_DIVASRV_P_30M_PCI }, + /* Diva Server PRI 2.0 adapter 0xE015 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2), + CARDTYPE_DIVASRV_P_30M_V2_PCI }, + /* Diva Server Voice 4BRI-8M PCI 0xE016 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_4BRI_VOIP), + CARDTYPE_DIVASRV_VOICE_Q_8M_PCI }, + /* Diva Server Voice 4BRI-8M 2.0 PCI 0xE017 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_4BRI_2_VOIP), + CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI }, + /* Diva Server BRI-2M 2.0 PCI 0xE018 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_BRI2M_2), + CARDTYPE_DIVASRV_B_2M_V2_PCI }, + /* Diva Server Voice PRI 2.0 PCI 0xE019 */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP), + CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI }, + /* Diva Server 2FX 0xE01A */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_2F), + CARDTYPE_DIVASRV_B_2F_PCI }, + /* Diva Server Voice BRI-2M 2.0 PCI 0xE01B */ + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_BRI2M_2_VOIP), + CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI }, + { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, divas_pci_tbl); @@ -581,7 +579,6 @@ xdi_copy_from_user(void *os_handle, void *dst, const void __user *src, int lengt */ static int divas_open(struct inode *inode, struct file *file) { - cycle_kernel_lock(); return (0); } diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 095ed76ebe8056e3d7d816accfc1e32aea1fdf84..987fb1824f08ed7d376a47bc7b41c9b94a4a87a3 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -5354,12 +5354,9 @@ static struct pci_device_id hfmultipci_ids[] __devinitdata = { { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD, PCI_SUBDEVICE_ID_CCD_JHSE1, 0, 0, H(25)}, /* Junghanns E1 */ - { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0}, - { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0}, - { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0}, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_HFC4S), 0 }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_HFC8S), 0 }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_HFCE1), 0 }, {0, } }; #undef H diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 5940a2c12074a285bce286fb6659eb73944dd96f..10757abac0ba3b45b24566208d25bc930ec56d99 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2188,52 +2188,52 @@ static const struct _hfc_map hfc_map[] = static struct pci_device_id hfc_ids[] = { - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[0]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[1]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[2]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[3]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[4]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[5]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[6]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[7]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[8]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[9]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[10]}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[11]}, - {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[12]}, - {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[13]}, - {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[14]}, - {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[15]}, - {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[16]}, - {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[17]}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[18]}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[19]}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[20]}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[21]}, - {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[22]}, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0), + (unsigned long) &hfc_map[0] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000), + (unsigned long) &hfc_map[1] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006), + (unsigned long) &hfc_map[2] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007), + (unsigned long) &hfc_map[3] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008), + (unsigned long) &hfc_map[4] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009), + (unsigned long) &hfc_map[5] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A), + (unsigned long) &hfc_map[6] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B), + (unsigned long) &hfc_map[7] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C), + (unsigned long) &hfc_map[8] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100), + (unsigned long) &hfc_map[9] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700), + (unsigned long) &hfc_map[10] }, + { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701), + (unsigned long) &hfc_map[11] }, + { PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1), + (unsigned long) &hfc_map[12] }, + { PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675), + (unsigned long) &hfc_map[13] }, + { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT), + (unsigned long) &hfc_map[14] }, + { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T), + (unsigned long) &hfc_map[15] }, + { PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575), + (unsigned long) &hfc_map[16] }, + { PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0), + (unsigned long) &hfc_map[17] }, + { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E), + (unsigned long) &hfc_map[18] }, + { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E), + (unsigned long) &hfc_map[19] }, + { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A), + (unsigned long) &hfc_map[20] }, + { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A), + (unsigned long) &hfc_map[21] }, + { PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2), + (unsigned long) &hfc_map[22] }, {}, }; diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index f5b3d2b26a0811455bccd1824face31364eaa372..2a2181d58debaa0a055dc70e633badeee15700cd 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -125,36 +125,25 @@ struct inf_hw { #define PCI_SUB_ID_SEDLBAUER 0x01 static struct pci_device_id infineon_ids[] __devinitdata = { - { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA20}, - { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20_U, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA20U}, - { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA201, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA201}, - { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA202, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA202}, + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20), INF_DIVA20 }, + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20_U), INF_DIVA20U }, + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA201), INF_DIVA201 }, + { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA202), INF_DIVA202 }, { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100, PCI_SUBVENDOR_SEDLBAUER_PCI, PCI_SUB_ID_SEDLBAUER, 0, 0, - INF_SPEEDWIN}, + INF_SPEEDWIN }, { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100, - PCI_SUBVENDOR_HST_SAPHIR3, PCI_SUB_ID_SEDLBAUER, 0, 0, INF_SAPHIR3}, - { PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_MICROLINK, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_QS1000}, - { PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_QS3000, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_QS3000}, - { PCI_VENDOR_ID_SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_NICCY}, + PCI_SUBVENDOR_HST_SAPHIR3, PCI_SUB_ID_SEDLBAUER, 0, 0, INF_SAPHIR3 }, + { PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_MICROLINK), INF_QS1000 }, + { PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_QS3000), INF_QS3000 }, + { PCI_VDEVICE(SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY), INF_NICCY }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO, 0, 0, - INF_SCT_1}, - { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R685, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R685}, - { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R753, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R753}, - { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R753}, - { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_OLITEC, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R753}, + INF_SCT_1 }, + { PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R685), INF_GAZEL_R685 }, + { PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R753), INF_GAZEL_R753 }, + { PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO), INF_GAZEL_R753 }, + { PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_OLITEC), INF_GAZEL_R753 }, { } }; MODULE_DEVICE_TABLE(pci, infineon_ids); diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 544cf4b1cce3a1580b512d33d80dd92d428e754d..6f9afcd5ca4e9301a07c8ec5a4c0473e255363c3 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1909,68 +1909,68 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if) static struct pci_device_id hisax_pci_tbl[] __devinitdata = { #ifdef CONFIG_HISAX_FRITZPCI - {PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) }, #endif #ifdef CONFIG_HISAX_DIEHLDIVA - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20_U, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA201, PCI_ANY_ID, PCI_ANY_ID}, -//######################################################################################### - {PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA202, PCI_ANY_ID, PCI_ANY_ID}, -//######################################################################################### + {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20) }, + {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20_U) }, + {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA201) }, +/*##########################################################################*/ + {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA202) }, +/*##########################################################################*/ #endif #ifdef CONFIG_HISAX_ELSA - {PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_MICROLINK, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_QS3000, PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_MICROLINK) }, + {PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_QS3000) }, #endif #ifdef CONFIG_HISAX_GAZEL - {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R685, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R753, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_OLITEC, PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R685) }, + {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R753) }, + {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO) }, + {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_OLITEC) }, #endif #ifdef CONFIG_HISAX_SCT_QUADRO - {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_9050) }, #endif #ifdef CONFIG_HISAX_NICCY - {PCI_VENDOR_ID_SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY, PCI_ANY_ID,PCI_ANY_ID}, + {PCI_VDEVICE(SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY) }, #endif #ifdef CONFIG_HISAX_SEDLBAUER - {PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100, PCI_ANY_ID,PCI_ANY_ID}, + {PCI_VDEVICE(TIGERJET, PCI_DEVICE_ID_TIGERJET_100) }, #endif #if defined(CONFIG_HISAX_NETJET) || defined(CONFIG_HISAX_NETJET_U) - {PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300, PCI_ANY_ID,PCI_ANY_ID}, + {PCI_VDEVICE(TIGERJET, PCI_DEVICE_ID_TIGERJET_300) }, #endif #if defined(CONFIG_HISAX_TELESPCI) || defined(CONFIG_HISAX_SCT_QUADRO) - {PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, PCI_ANY_ID,PCI_ANY_ID}, + {PCI_VDEVICE(ZORAN, PCI_DEVICE_ID_ZORAN_36120) }, #endif #ifdef CONFIG_HISAX_W6692 - {PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, PCI_ANY_ID,PCI_ANY_ID}, - {PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, PCI_ANY_ID,PCI_ANY_ID}, + {PCI_VDEVICE(DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH) }, + {PCI_VDEVICE(WINBOND2, PCI_DEVICE_ID_WINBOND2_6692) }, #endif #ifdef CONFIG_HISAX_HFC_PCI - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A, PCI_ANY_ID, PCI_ANY_ID}, - {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A, PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700) }, + {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701) }, + {PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1) }, + {PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675) }, + {PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT) }, + {PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T) }, + {PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575) }, + {PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0) }, + {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E) }, + {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E) }, + {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A) }, + {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A) }, #endif { } /* Terminating entry */ }; diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c index 8b853d58e8208e7ca64adacd2b197609a2ba3133..c0771f98fa11ae175e383692329d997e89f1724c 100644 --- a/drivers/isdn/hisax/q931.c +++ b/drivers/isdn/hisax/q931.c @@ -1152,20 +1152,11 @@ QuickHex(char *txt, u_char * p, int cnt) { register int i; register char *t = txt; - register u_char w; for (i = 0; i < cnt; i++) { *t++ = ' '; - w = (p[i] >> 4) & 0x0f; - if (w < 10) - *t++ = '0' + w; - else - *t++ = 'A' - 10 + w; - w = p[i] & 0x0f; - if (w < 10) - *t++ = '0' + w; - else - *t++ = 'A' - 10 + w; + *t++ = hex_asc_hi(p[i]); + *t++ = hex_asc_lo(p[i]); } *t++ = 0; return (t - txt); diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 80966462d6dcba0eb58caf57ba87768188a0a36b..96b3e39c3356b0ff08dbfc38577b66b982cba701 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c @@ -17,11 +17,12 @@ #include #include #include -#include +#include #include #include "hysdn_defs.h" +static DEFINE_MUTEX(hysdn_conf_mutex); static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $"; #define INFO_OUT_LEN 80 /* length of info line including lf */ @@ -234,7 +235,7 @@ hysdn_conf_open(struct inode *ino, struct file *filep) char *cp, *tmp; /* now search the addressed card */ - lock_kernel(); + mutex_lock(&hysdn_conf_mutex); card = card_root; while (card) { pd = card->procconf; @@ -243,7 +244,7 @@ hysdn_conf_open(struct inode *ino, struct file *filep) card = card->next; /* search next entry */ } if (!card) { - unlock_kernel(); + mutex_unlock(&hysdn_conf_mutex); return (-ENODEV); /* device is unknown/invalid */ } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) @@ -255,7 +256,7 @@ hysdn_conf_open(struct inode *ino, struct file *filep) /* write only access -> write boot file or conf line */ if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) { - unlock_kernel(); + mutex_unlock(&hysdn_conf_mutex); return (-EFAULT); } cnf->card = card; @@ -267,7 +268,7 @@ hysdn_conf_open(struct inode *ino, struct file *filep) /* read access -> output card info data */ if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) { - unlock_kernel(); + mutex_unlock(&hysdn_conf_mutex); return (-EFAULT); /* out of memory */ } filep->private_data = tmp; /* start of string */ @@ -301,10 +302,10 @@ hysdn_conf_open(struct inode *ino, struct file *filep) *cp++ = '\n'; *cp = 0; /* end of string */ } else { /* simultaneous read/write access forbidden ! */ - unlock_kernel(); + mutex_unlock(&hysdn_conf_mutex); return (-EPERM); /* no permission this time */ } - unlock_kernel(); + mutex_unlock(&hysdn_conf_mutex); return nonseekable_open(ino, filep); } /* hysdn_conf_open */ @@ -319,7 +320,7 @@ hysdn_conf_close(struct inode *ino, struct file *filep) int retval = 0; struct proc_dir_entry *pd; - lock_kernel(); + mutex_lock(&hysdn_conf_mutex); /* search the addressed card */ card = card_root; while (card) { @@ -329,7 +330,7 @@ hysdn_conf_close(struct inode *ino, struct file *filep) card = card->next; /* search next entry */ } if (!card) { - unlock_kernel(); + mutex_unlock(&hysdn_conf_mutex); return (-ENODEV); /* device is unknown/invalid */ } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) @@ -352,7 +353,7 @@ hysdn_conf_close(struct inode *ino, struct file *filep) kfree(filep->private_data); /* release memory */ } - unlock_kernel(); + mutex_unlock(&hysdn_conf_mutex); return (retval); } /* hysdn_conf_close */ diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index e83f6fda32fe797065fc07049e9175ce92b0f507..2ee93d04b2dd688ffd92690e6d4f4a6baaafee0b 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c @@ -15,13 +15,15 @@ #include #include #include -#include +#include +#include #include "hysdn_defs.h" /* the proc subdir for the interface is defined in the procconf module */ extern struct proc_dir_entry *hysdn_proc_entry; +static DEFINE_MUTEX(hysdn_log_mutex); static void put_log_buffer(hysdn_card * card, char *cp); /*************************************************/ @@ -154,10 +156,9 @@ static ssize_t hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t * off) { unsigned long u = 0; - int found = 0; - unsigned char *cp, valbuf[128]; - long base = 10; - hysdn_card *card = (hysdn_card *) file->private_data; + int rc; + unsigned char valbuf[128]; + hysdn_card *card = file->private_data; if (count > (sizeof(valbuf) - 1)) count = sizeof(valbuf) - 1; /* limit length */ @@ -165,32 +166,10 @@ hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t return (-EFAULT); /* copy failed */ valbuf[count] = 0; /* terminating 0 */ - cp = valbuf; - if ((count > 2) && (valbuf[0] == '0') && (valbuf[1] == 'x')) { - cp += 2; /* pointer after hex modifier */ - base = 16; - } - /* scan the input for debug flags */ - while (*cp) { - if ((*cp >= '0') && (*cp <= '9')) { - found = 1; - u *= base; /* adjust to next digit */ - u += *cp++ - '0'; - continue; - } - if (base != 16) - break; /* end of number */ - - if ((*cp >= 'a') && (*cp <= 'f')) { - found = 1; - u *= base; /* adjust to next digit */ - u += *cp++ - 'a' + 10; - continue; - } - break; /* terminated */ - } - if (found) { + rc = strict_strtoul(valbuf, 0, &u); + + if (rc == 0) { card->debug_flags = u; /* remember debug flags */ hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags); } @@ -251,7 +230,7 @@ hysdn_log_open(struct inode *ino, struct file *filep) struct procdata *pd = NULL; unsigned long flags; - lock_kernel(); + mutex_lock(&hysdn_log_mutex); card = card_root; while (card) { pd = card->proclog; @@ -260,7 +239,7 @@ hysdn_log_open(struct inode *ino, struct file *filep) card = card->next; /* search next entry */ } if (!card) { - unlock_kernel(); + mutex_unlock(&hysdn_log_mutex); return (-ENODEV); /* device is unknown/invalid */ } filep->private_data = card; /* remember our own card */ @@ -278,10 +257,10 @@ hysdn_log_open(struct inode *ino, struct file *filep) filep->private_data = &pd->log_head; spin_unlock_irqrestore(&card->hysdn_lock, flags); } else { /* simultaneous read/write access forbidden ! */ - unlock_kernel(); + mutex_unlock(&hysdn_log_mutex); return (-EPERM); /* no permission this time */ } - unlock_kernel(); + mutex_unlock(&hysdn_log_mutex); return nonseekable_open(ino, filep); } /* hysdn_log_open */ @@ -300,7 +279,7 @@ hysdn_log_close(struct inode *ino, struct file *filep) hysdn_card *card; int retval = 0; - lock_kernel(); + mutex_lock(&hysdn_log_mutex); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write debug level written */ retval = 0; /* success */ @@ -339,7 +318,7 @@ hysdn_log_close(struct inode *ino, struct file *filep) kfree(inf); } } /* read access */ - unlock_kernel(); + mutex_unlock(&hysdn_log_mutex); return (retval); } /* hysdn_log_close */ diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index a44cdb492ea999568695428b9a8353090cea0e24..15632bd2f643cd8c48da2b7d2d6121b6f9d8df1a 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "isdn_common.h" #include "isdn_tty.h" #include "isdn_net.h" @@ -42,6 +42,7 @@ MODULE_LICENSE("GPL"); isdn_dev *dev; +static DEFINE_MUTEX(isdn_mutex); static char *isdn_revision = "$Revision: 1.1.2.3 $"; extern char *isdn_net_revision; @@ -1070,7 +1071,7 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t * off) int retval; char *p; - lock_kernel(); + mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { if (!file->private_data) { if (file->f_flags & O_NONBLOCK) { @@ -1163,7 +1164,7 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t * off) #endif retval = -ENODEV; out: - unlock_kernel(); + mutex_unlock(&isdn_mutex); return retval; } @@ -1180,7 +1181,7 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t * off if (!dev->drivers) return -ENODEV; - lock_kernel(); + mutex_lock(&isdn_mutex); if (minor <= ISDN_MINOR_BMAX) { printk(KERN_WARNING "isdn_write minor %d obsolete!\n", minor); drvidx = isdn_minor2drv(minor); @@ -1225,7 +1226,7 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t * off #endif retval = -ENODEV; out: - unlock_kernel(); + mutex_unlock(&isdn_mutex); return retval; } @@ -1236,7 +1237,7 @@ isdn_poll(struct file *file, poll_table * wait) unsigned int minor = iminor(file->f_path.dentry->d_inode); int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); - lock_kernel(); + mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { poll_wait(file, &(dev->info_waitq), wait); /* mask = POLLOUT | POLLWRNORM; */ @@ -1266,7 +1267,7 @@ isdn_poll(struct file *file, poll_table * wait) #endif mask = POLLERR; out: - unlock_kernel(); + mutex_unlock(&isdn_mutex); return mask; } @@ -1727,9 +1728,9 @@ isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&isdn_mutex); ret = isdn_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&isdn_mutex); return ret; } @@ -1745,7 +1746,7 @@ isdn_open(struct inode *ino, struct file *filep) int chidx; int retval = -ENODEV; - lock_kernel(); + mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { infostruct *p; @@ -1796,7 +1797,7 @@ isdn_open(struct inode *ino, struct file *filep) #endif out: nonseekable_open(ino, filep); - unlock_kernel(); + mutex_unlock(&isdn_mutex); return retval; } @@ -1805,7 +1806,7 @@ isdn_close(struct inode *ino, struct file *filep) { uint minor = iminor(ino); - lock_kernel(); + mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { infostruct *p = dev->infochain; infostruct *q = NULL; @@ -1839,7 +1840,7 @@ isdn_close(struct inode *ino, struct file *filep) #endif out: - unlock_kernel(); + mutex_unlock(&isdn_mutex); return 0; } diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 8c85d1e88cc6259c41dff0b4615f32681edd39ac..26d44c3ca1d8d8a1c1da84fb2df404e2d4995e75 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2924,16 +2924,17 @@ isdn_net_getcfg(isdn_net_ioctl_cfg * cfg) cfg->dialtimeout = lp->dialtimeout >= 0 ? lp->dialtimeout / HZ : -1; cfg->dialwait = lp->dialwait / HZ; if (lp->slave) { - if (strlen(lp->slave->name) > 8) + if (strlen(lp->slave->name) >= 10) strcpy(cfg->slave, "too-long"); else strcpy(cfg->slave, lp->slave->name); } else cfg->slave[0] = '\0'; if (lp->master) { - if (strlen(lp->master->name) > 8) + if (strlen(lp->master->name) >= 10) strcpy(cfg->master, "too-long"); - strcpy(cfg->master, lp->master->name); + else + strcpy(cfg->master, lp->master->name); } else cfg->master[0] = '\0'; return 0; diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index f37b8f68d0aa0d3b00f7405d1f7bb31c8ab5e6bb..fe824e0cbb252cedda78527fac18f2a40876a9df 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -449,14 +449,9 @@ static int get_filter(void __user *arg, struct sock_filter **p) /* uprog.len is unsigned short, so no overflow here */ len = uprog.len * sizeof(struct sock_filter); - code = kmalloc(len, GFP_KERNEL); - if (code == NULL) - return -ENOMEM; - - if (copy_from_user(code, uprog.filter, len)) { - kfree(code); - return -EFAULT; - } + code = memdup_user(uprog.filter, len); + if (IS_ERR(code)) + return PTR_ERR(code); err = sk_chk_filter(code, uprog.len); if (err) { @@ -482,7 +477,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) struct isdn_ppp_comp_data data; void __user *argp = (void __user *)arg; - is = (struct ippp_struct *) file->private_data; + is = file->private_data; lp = is->lp; if (is->debug & 0x1) diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index fc8454d2eea5f9343be87eaf0f3e867fb4294ca4..51dc60da333bcf17cadca7554283b60ce98b21d7 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -2636,12 +2636,6 @@ isdn_tty_modem_result(int code, modem_info * info) if ((info->flags & ISDN_ASYNC_CLOSING) || (!info->tty)) { return; } -#ifdef CONFIG_ISDN_AUDIO - if ( !info->vonline ) - tty_ldisc_flush(info->tty); -#else - tty_ldisc_flush(info->tty); -#endif if ((info->flags & ISDN_ASYNC_CHECK_CD) && (!((info->flags & ISDN_ASYNC_CALLOUT_ACTIVE) && (info->flags & ISDN_ASYNC_CALLOUT_NOHUP)))) { diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 81048b8ed8ad97b7890f631155c77264c35fd98d..de43c8c70ad082451bbce7e7545a234c0043b836 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -24,9 +24,10 @@ #include #include #include -#include +#include #include "core.h" +static DEFINE_MUTEX(mISDN_mutex); static u_int *debug; @@ -224,7 +225,7 @@ mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__, filep, cmd, arg); - lock_kernel(); + mutex_lock(&mISDN_mutex); switch (cmd) { case IMADDTIMER: if (get_user(tout, (int __user *)arg)) { @@ -256,7 +257,7 @@ mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) default: ret = -EINVAL; } - unlock_kernel(); + mutex_unlock(&mISDN_mutex); return ret; } diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c index 123c1d6c43b45f82a3713c62e7d8f3357a64e947..1507d2e83fbb0d5acf9486e37d1fdf1583fd1a48 100644 --- a/drivers/isdn/pcbit/drv.c +++ b/drivers/isdn/pcbit/drv.c @@ -411,14 +411,10 @@ static int pcbit_writecmd(const u_char __user *buf, int len, int driver, int cha return -EINVAL; } - cbuf = kmalloc(len, GFP_KERNEL); - if (!cbuf) - return -ENOMEM; + cbuf = memdup_user(buf, len); + if (IS_ERR(cbuf)) + return PTR_ERR(cbuf); - if (copy_from_user(cbuf, buf, len)) { - kfree(cbuf); - return -EFAULT; - } memcpy_toio(dev->sh_mem, cbuf, len); kfree(cbuf); return len; diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c index 1081091bbfaf74e95285c02bc8bf85ab075e5018..43c5dc3516e5ac9c1d58844b38f4c776ad8ee90f 100644 --- a/drivers/isdn/sc/ioctl.c +++ b/drivers/isdn/sc/ioctl.c @@ -215,19 +215,13 @@ int sc_ioctl(int card, scs_ioctl *data) pr_debug("%s: DCBIOSETSPID: ioctl received\n", sc_adapter[card]->devicename); - spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); - if(!spid) { - kfree(rcvmsg); - return -ENOMEM; - } - /* * Get the spid from user space */ - if (copy_from_user(spid, data->dataptr, SCIOC_SPIDSIZE)) { + spid = memdup_user(data->dataptr, SCIOC_SPIDSIZE); + if (IS_ERR(spid)) { kfree(rcvmsg); - kfree(spid); - return -EFAULT; + return PTR_ERR(spid); } pr_debug("%s: SCIOCSETSPID: setting channel %d spid to %s\n", @@ -296,18 +290,13 @@ int sc_ioctl(int card, scs_ioctl *data) pr_debug("%s: SCIOSETDN: ioctl received\n", sc_adapter[card]->devicename); - dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL); - if (!dn) { - kfree(rcvmsg); - return -ENOMEM; - } /* * Get the spid from user space */ - if (copy_from_user(dn, data->dataptr, SCIOC_DNSIZE)) { + dn = memdup_user(data->dataptr, SCIOC_DNSIZE); + if (IS_ERR(dn)) { kfree(rcvmsg); - kfree(dn); - return -EFAULT; + return PTR_ERR(dn); } pr_debug("%s: SCIOCSETDN: setting channel %d dn to %s\n", diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h index 75e28fef797b05164c500f3f9506f4c16868cbdf..d693b8d15cde21b93739885b049128ca163e6218 100644 --- a/drivers/net/3c527.h +++ b/drivers/net/3c527.h @@ -34,7 +34,7 @@ struct mc32_mailbox { u16 mbox; u16 data[1]; -} __attribute((packed)); +} __packed; struct skb_header { @@ -43,7 +43,7 @@ struct skb_header u16 next; /* Do not change! */ u16 length; u32 data; -} __attribute((packed)); +} __packed; struct mc32_stats { @@ -68,7 +68,7 @@ struct mc32_stats u32 dataA[6]; u16 dataB[5]; u32 dataC[14]; -} __attribute((packed)); +} __packed; #define STATUS_MASK 0x0F #define COMPLETED (1<<7) diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index d75803e6e527b634d24d6d45ed8bc0962b17e535..c754d88e5ec92d0af82d80094a4576256a50dc61 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -435,7 +435,6 @@ MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); First the windows. There are eight register windows, with the command and status registers available in each. */ -#define EL3WINDOW(win_num) iowrite16(SelectWindow + (win_num), ioaddr + EL3_CMD) #define EL3_CMD 0x0e #define EL3_STATUS 0x0e @@ -645,10 +644,51 @@ struct vortex_private { u16 deferred; /* Resend these interrupts when we * bale from the ISR */ u16 io_size; /* Size of PCI region (for release_region) */ - spinlock_t lock; /* Serialise access to device & its vortex_private */ - struct mii_if_info mii; /* MII lib hooks/info */ + + /* Serialises access to hardware other than MII and variables below. + * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ + spinlock_t lock; + + spinlock_t mii_lock; /* Serialises access to MII */ + struct mii_if_info mii; /* MII lib hooks/info */ + spinlock_t window_lock; /* Serialises access to windowed regs */ + int window; /* Register window */ }; +static void window_set(struct vortex_private *vp, int window) +{ + if (window != vp->window) { + iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); + vp->window = window; + } +} + +#define DEFINE_WINDOW_IO(size) \ +static u ## size \ +window_read ## size(struct vortex_private *vp, int window, int addr) \ +{ \ + unsigned long flags; \ + u ## size ret; \ + spin_lock_irqsave(&vp->window_lock, flags); \ + window_set(vp, window); \ + ret = ioread ## size(vp->ioaddr + addr); \ + spin_unlock_irqrestore(&vp->window_lock, flags); \ + return ret; \ +} \ +static void \ +window_write ## size(struct vortex_private *vp, u ## size value, \ + int window, int addr) \ +{ \ + unsigned long flags; \ + spin_lock_irqsave(&vp->window_lock, flags); \ + window_set(vp, window); \ + iowrite ## size(value, vp->ioaddr + addr); \ + spin_unlock_irqrestore(&vp->window_lock, flags); \ +} +DEFINE_WINDOW_IO(8) +DEFINE_WINDOW_IO(16) +DEFINE_WINDOW_IO(32) + #ifdef CONFIG_PCI #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) #else @@ -711,7 +751,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, static int vortex_up(struct net_device *dev); static void vortex_down(struct net_device *dev, int final); static int vortex_open(struct net_device *dev); -static void mdio_sync(void __iomem *ioaddr, int bits); +static void mdio_sync(struct vortex_private *vp, int bits); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *vp, int phy_id, int location, int value); static void vortex_timer(unsigned long arg); @@ -980,10 +1020,16 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, ioaddr = pci_iomap(pdev, pci_bar, 0); if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); + if (!ioaddr) { + pci_disable_device(pdev); + rc = -ENOMEM; + goto out; + } rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, ent->driver_data, unit); if (rc < 0) { + pci_iounmap(pdev, ioaddr); pci_disable_device(pdev); goto out; } @@ -1119,6 +1165,7 @@ static int __devinit vortex_probe1(struct device *gendev, vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; vp->io_size = vci->io_size; vp->card_idx = card_idx; + vp->window = -1; /* module list only for Compaq device */ if (gendev == NULL) { @@ -1154,6 +1201,8 @@ static int __devinit vortex_probe1(struct device *gendev, } spin_lock_init(&vp->lock); + spin_lock_init(&vp->mii_lock); + spin_lock_init(&vp->window_lock); vp->gendev = gendev; vp->mii.dev = dev; vp->mii.mdio_read = mdio_read; @@ -1205,7 +1254,6 @@ static int __devinit vortex_probe1(struct device *gendev, vp->mii.force_media = vp->full_duplex; vp->options = option; /* Read the station address from the EEPROM. */ - EL3WINDOW(0); { int base; @@ -1218,14 +1266,15 @@ static int __devinit vortex_probe1(struct device *gendev, for (i = 0; i < 0x40; i++) { int timer; - iowrite16(base + i, ioaddr + Wn0EepromCmd); + window_write16(vp, base + i, 0, Wn0EepromCmd); /* Pause for at least 162 us. for the read to take place. */ for (timer = 10; timer >= 0; timer--) { udelay(162); - if ((ioread16(ioaddr + Wn0EepromCmd) & 0x8000) == 0) + if ((window_read16(vp, 0, Wn0EepromCmd) & + 0x8000) == 0) break; } - eeprom[i] = ioread16(ioaddr + Wn0EepromData); + eeprom[i] = window_read16(vp, 0, Wn0EepromData); } } for (i = 0; i < 0x18; i++) @@ -1250,9 +1299,8 @@ static int __devinit vortex_probe1(struct device *gendev, pr_err("*** EEPROM MAC address is invalid.\n"); goto free_ring; /* With every pack */ } - EL3WINDOW(2); for (i = 0; i < 6; i++) - iowrite8(dev->dev_addr[i], ioaddr + i); + window_write8(vp, dev->dev_addr[i], 2, i); if (print_info) pr_cont(", IRQ %d\n", dev->irq); @@ -1261,8 +1309,7 @@ static int __devinit vortex_probe1(struct device *gendev, pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", dev->irq); - EL3WINDOW(4); - step = (ioread8(ioaddr + Wn4_NetDiag) & 0x1e) >> 1; + step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; if (print_info) { pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], @@ -1285,17 +1332,15 @@ static int __devinit vortex_probe1(struct device *gendev, (unsigned long long)pci_resource_start(pdev, 2), vp->cb_fn_base); } - EL3WINDOW(2); - n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010; + n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; - iowrite16(n, ioaddr + Wn2_ResetOptions); + window_write16(vp, n, 2, Wn2_ResetOptions); if (vp->drv_flags & WNO_XCVR_PWR) { - EL3WINDOW(0); - iowrite16(0x0800, ioaddr); + window_write16(vp, 0x0800, 0, 0); } } @@ -1313,14 +1358,13 @@ static int __devinit vortex_probe1(struct device *gendev, { static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; unsigned int config; - EL3WINDOW(3); - vp->available_media = ioread16(ioaddr + Wn3_Options); + vp->available_media = window_read16(vp, 3, Wn3_Options); if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ vp->available_media = 0x40; - config = ioread32(ioaddr + Wn3_Config); + config = window_read32(vp, 3, Wn3_Config); if (print_info) { pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", - config, ioread16(ioaddr + Wn3_Options)); + config, window_read16(vp, 3, Wn3_Options)); pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", 8 << RAM_SIZE(config), RAM_WIDTH(config) ? "word" : "byte", @@ -1346,11 +1390,10 @@ static int __devinit vortex_probe1(struct device *gendev, if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { int phy, phy_idx = 0; - EL3WINDOW(4); mii_preamble_required++; if (vp->drv_flags & EXTRA_PREAMBLE) mii_preamble_required++; - mdio_sync(ioaddr, 32); + mdio_sync(vp, 32); mdio_read(dev, 24, MII_BMSR); for (phy = 0; phy < 32 && phy_idx < 1; phy++) { int mii_status, phyx; @@ -1478,18 +1521,17 @@ static void vortex_set_duplex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; pr_info("%s: setting %s-duplex.\n", dev->name, (vp->full_duplex) ? "full" : "half"); - EL3WINDOW(3); /* Set the full-duplex bit. */ - iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | - (vp->large_frames ? 0x40 : 0) | - ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? - 0x100 : 0), - ioaddr + Wn3_MAC_Ctrl); + window_write16(vp, + ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | + (vp->large_frames ? 0x40 : 0) | + ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? + 0x100 : 0), + 3, Wn3_MAC_Ctrl); } static void vortex_check_media(struct net_device *dev, unsigned int init) @@ -1529,8 +1571,7 @@ vortex_up(struct net_device *dev) } /* Before initializing select the active media port. */ - EL3WINDOW(3); - config = ioread32(ioaddr + Wn3_Config); + config = window_read32(vp, 3, Wn3_Config); if (vp->media_override != 7) { pr_info("%s: Media override to transceiver %d (%s).\n", @@ -1577,10 +1618,9 @@ vortex_up(struct net_device *dev) config = BFINS(config, dev->if_port, 20, 4); if (vortex_debug > 6) pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); - iowrite32(config, ioaddr + Wn3_Config); + window_write32(vp, config, 3, Wn3_Config); if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { - EL3WINDOW(4); mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); @@ -1601,51 +1641,46 @@ vortex_up(struct net_device *dev) iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); if (vortex_debug > 1) { - EL3WINDOW(4); pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", - dev->name, dev->irq, ioread16(ioaddr + Wn4_Media)); + dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); } /* Set the station address and mask in window 2 each time opened. */ - EL3WINDOW(2); for (i = 0; i < 6; i++) - iowrite8(dev->dev_addr[i], ioaddr + i); + window_write8(vp, dev->dev_addr[i], 2, i); for (; i < 12; i+=2) - iowrite16(0, ioaddr + i); + window_write16(vp, 0, 2, i); if (vp->cb_fn_base) { - unsigned short n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010; + unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; - iowrite16(n, ioaddr + Wn2_ResetOptions); + window_write16(vp, n, 2, Wn2_ResetOptions); } if (dev->if_port == XCVR_10base2) /* Start the thinnet transceiver. We should really wait 50ms...*/ iowrite16(StartCoax, ioaddr + EL3_CMD); if (dev->if_port != XCVR_NWAY) { - EL3WINDOW(4); - iowrite16((ioread16(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) | - media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); + window_write16(vp, + (window_read16(vp, 4, Wn4_Media) & + ~(Media_10TP|Media_SQE)) | + media_tbl[dev->if_port].media_bits, + 4, Wn4_Media); } /* Switch to the stats window, and clear all stats by reading. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); - EL3WINDOW(6); for (i = 0; i < 10; i++) - ioread8(ioaddr + i); - ioread16(ioaddr + 10); - ioread16(ioaddr + 12); + window_read8(vp, 6, i); + window_read16(vp, 6, 10); + window_read16(vp, 6, 12); /* New: On the Vortex we must also clear the BadSSD counter. */ - EL3WINDOW(4); - ioread8(ioaddr + 12); + window_read8(vp, 4, 12); /* ..and on the Boomerang we enable the extra statistics bits. */ - iowrite16(0x0040, ioaddr + Wn4_NetDiag); - - /* Switch to register set 7 for normal use. */ - EL3WINDOW(7); + window_write16(vp, 0x0040, 4, Wn4_NetDiag); if (vp->full_bus_master_rx) { /* Boomerang bus master. */ vp->cur_rx = vp->dirty_rx = 0; @@ -1763,7 +1798,7 @@ vortex_timer(unsigned long data) void __iomem *ioaddr = vp->ioaddr; int next_tick = 60*HZ; int ok = 0; - int media_status, old_window; + int media_status; if (vortex_debug > 2) { pr_debug("%s: Media selection timer tick happened, %s.\n", @@ -1771,10 +1806,7 @@ vortex_timer(unsigned long data) pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } - disable_irq_lockdep(dev->irq); - old_window = ioread16(ioaddr + EL3_CMD) >> 13; - EL3WINDOW(4); - media_status = ioread16(ioaddr + Wn4_Media); + media_status = window_read16(vp, 4, Wn4_Media); switch (dev->if_port) { case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: if (media_status & Media_LnkBeat) { @@ -1794,10 +1826,7 @@ vortex_timer(unsigned long data) case XCVR_MII: case XCVR_NWAY: { ok = 1; - /* Interrupts are already disabled */ - spin_lock(&vp->lock); vortex_check_media(dev, 0); - spin_unlock(&vp->lock); } break; default: /* Other media types handled by Tx timeouts. */ @@ -1816,6 +1845,8 @@ vortex_timer(unsigned long data) if (!ok) { unsigned int config; + spin_lock_irq(&vp->lock); + do { dev->if_port = media_tbl[dev->if_port].next; } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); @@ -1830,19 +1861,22 @@ vortex_timer(unsigned long data) dev->name, media_tbl[dev->if_port].name); next_tick = media_tbl[dev->if_port].wait; } - iowrite16((media_status & ~(Media_10TP|Media_SQE)) | - media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); + window_write16(vp, + (media_status & ~(Media_10TP|Media_SQE)) | + media_tbl[dev->if_port].media_bits, + 4, Wn4_Media); - EL3WINDOW(3); - config = ioread32(ioaddr + Wn3_Config); + config = window_read32(vp, 3, Wn3_Config); config = BFINS(config, dev->if_port, 20, 4); - iowrite32(config, ioaddr + Wn3_Config); + window_write32(vp, config, 3, Wn3_Config); iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, ioaddr + EL3_CMD); if (vortex_debug > 1) pr_debug("wrote 0x%08x to Wn3_Config\n", config); /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ + + spin_unlock_irq(&vp->lock); } leave_media_alone: @@ -1850,8 +1884,6 @@ vortex_timer(unsigned long data) pr_debug("%s: Media selection timer finished, %s.\n", dev->name, media_tbl[dev->if_port].name); - EL3WINDOW(old_window); - enable_irq_lockdep(dev->irq); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); @@ -1865,12 +1897,11 @@ static void vortex_tx_timeout(struct net_device *dev) pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", dev->name, ioread8(ioaddr + TxStatus), ioread16(ioaddr + EL3_STATUS)); - EL3WINDOW(4); pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", - ioread16(ioaddr + Wn4_NetDiag), - ioread16(ioaddr + Wn4_Media), + window_read16(vp, 4, Wn4_NetDiag), + window_read16(vp, 4, Wn4_Media), ioread32(ioaddr + PktStatus), - ioread16(ioaddr + Wn4_FIFODiag)); + window_read16(vp, 4, Wn4_FIFODiag)); /* Slight code bloat to be user friendly. */ if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) pr_err("%s: Transmitter encountered 16 collisions --" @@ -1917,9 +1948,6 @@ static void vortex_tx_timeout(struct net_device *dev) /* Issue Tx Enable */ iowrite16(TxEnable, ioaddr + EL3_CMD); dev->trans_start = jiffies; /* prevent tx timeout */ - - /* Switch to register set 7 for normal use. */ - EL3WINDOW(7); } /* @@ -1980,10 +2008,10 @@ vortex_error(struct net_device *dev, int status) ioread16(ioaddr + EL3_STATUS) & StatsFull) { pr_warning("%s: Updating statistics failed, disabling " "stats as an interrupt source.\n", dev->name); - EL3WINDOW(5); - iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD); + iowrite16(SetIntrEnb | + (window_read16(vp, 5, 10) & ~StatsFull), + ioaddr + EL3_CMD); vp->intr_enable &= ~StatsFull; - EL3WINDOW(7); DoneDidThat++; } } @@ -1993,8 +2021,7 @@ vortex_error(struct net_device *dev, int status) } if (status & HostError) { u16 fifo_diag; - EL3WINDOW(4); - fifo_diag = ioread16(ioaddr + Wn4_FIFODiag); + fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", dev->name, fifo_diag); /* Adapter failure requires Tx/Rx reset and reinit. */ @@ -2043,9 +2070,13 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; - iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), - ioaddr + Wn7_MasterAddr); + vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, + PCI_DMA_TODEVICE); + spin_lock_irq(&vp->window_lock); + window_set(vp, 7); + iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); iowrite16(len, ioaddr + Wn7_MasterLen); + spin_unlock_irq(&vp->window_lock); vp->tx_skb = skb; iowrite16(StartDMADown, ioaddr + EL3_CMD); /* netif_wake_queue() will be called at the DMADone interrupt. */ @@ -2217,6 +2248,9 @@ vortex_interrupt(int irq, void *dev_id) pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, ioread8(ioaddr + Timer)); + spin_lock(&vp->window_lock); + window_set(vp, 7); + do { if (vortex_debug > 5) pr_debug("%s: In interrupt loop, status %4.4x.\n", @@ -2275,6 +2309,8 @@ vortex_interrupt(int irq, void *dev_id) iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); + spin_unlock(&vp->window_lock); + if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); @@ -2760,85 +2796,58 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev) static void update_stats(void __iomem *ioaddr, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); - int old_window = ioread16(ioaddr + EL3_CMD); - if (old_window == 0xffff) /* Chip suspended or ejected. */ - return; /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ /* Switch to the stats window, and read everything. */ - EL3WINDOW(6); - dev->stats.tx_carrier_errors += ioread8(ioaddr + 0); - dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); - dev->stats.tx_window_errors += ioread8(ioaddr + 4); - dev->stats.rx_fifo_errors += ioread8(ioaddr + 5); - dev->stats.tx_packets += ioread8(ioaddr + 6); - dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; - /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */ + dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); + dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); + dev->stats.tx_window_errors += window_read8(vp, 6, 4); + dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); + dev->stats.tx_packets += window_read8(vp, 6, 6); + dev->stats.tx_packets += (window_read8(vp, 6, 9) & + 0x30) << 4; + /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ /* Don't bother with register 9, an extension of registers 6&7. If we do use the 6&7 values the atomic update assumption above is invalid. */ - dev->stats.rx_bytes += ioread16(ioaddr + 10); - dev->stats.tx_bytes += ioread16(ioaddr + 12); + dev->stats.rx_bytes += window_read16(vp, 6, 10); + dev->stats.tx_bytes += window_read16(vp, 6, 12); /* Extra stats for get_ethtool_stats() */ - vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); - vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); - vp->xstats.tx_deferred += ioread8(ioaddr + 8); - EL3WINDOW(4); - vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); + vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); + vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); + vp->xstats.tx_deferred += window_read8(vp, 6, 8); + vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); dev->stats.collisions = vp->xstats.tx_multiple_collisions + vp->xstats.tx_single_collisions + vp->xstats.tx_max_collisions; { - u8 up = ioread8(ioaddr + 13); + u8 up = window_read8(vp, 4, 13); dev->stats.rx_bytes += (up & 0x0f) << 16; dev->stats.tx_bytes += (up & 0xf0) << 12; } - - EL3WINDOW(old_window >> 13); } static int vortex_nway_reset(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - unsigned long flags; - int rc; - spin_lock_irqsave(&vp->lock, flags); - EL3WINDOW(4); - rc = mii_nway_restart(&vp->mii); - spin_unlock_irqrestore(&vp->lock, flags); - return rc; + return mii_nway_restart(&vp->mii); } static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - unsigned long flags; - int rc; - spin_lock_irqsave(&vp->lock, flags); - EL3WINDOW(4); - rc = mii_ethtool_gset(&vp->mii, cmd); - spin_unlock_irqrestore(&vp->lock, flags); - return rc; + return mii_ethtool_gset(&vp->mii, cmd); } static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - unsigned long flags; - int rc; - spin_lock_irqsave(&vp->lock, flags); - EL3WINDOW(4); - rc = mii_ethtool_sset(&vp->mii, cmd); - spin_unlock_irqrestore(&vp->lock, flags); - return rc; + return mii_ethtool_sset(&vp->mii, cmd); } static u32 vortex_get_msglevel(struct net_device *dev) @@ -2909,6 +2918,36 @@ static void vortex_get_drvinfo(struct net_device *dev, } } +static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct vortex_private *vp = netdev_priv(dev); + + spin_lock_irq(&vp->lock); + wol->supported = WAKE_MAGIC; + + wol->wolopts = 0; + if (vp->enable_wol) + wol->wolopts |= WAKE_MAGIC; + spin_unlock_irq(&vp->lock); +} + +static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct vortex_private *vp = netdev_priv(dev); + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + spin_lock_irq(&vp->lock); + if (wol->wolopts & WAKE_MAGIC) + vp->enable_wol = 1; + else + vp->enable_wol = 0; + acpi_set_WOL(dev); + spin_unlock_irq(&vp->lock); + + return 0; +} + static const struct ethtool_ops vortex_ethtool_ops = { .get_drvinfo = vortex_get_drvinfo, .get_strings = vortex_get_strings, @@ -2920,6 +2959,8 @@ static const struct ethtool_ops vortex_ethtool_ops = { .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, + .get_wol = vortex_get_wol, + .set_wol = vortex_set_wol, }; #ifdef CONFIG_PCI @@ -2930,7 +2971,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int err; struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; unsigned long flags; pci_power_t state = 0; @@ -2942,7 +2982,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if(state != 0) pci_set_power_state(VORTEX_PCI(vp), PCI_D0); spin_lock_irqsave(&vp->lock, flags); - EL3WINDOW(4); err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); spin_unlock_irqrestore(&vp->lock, flags); if(state != 0) @@ -2985,8 +3024,6 @@ static void set_rx_mode(struct net_device *dev) static void set_8021q_mode(struct net_device *dev, int enable) { struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - int old_window = ioread16(ioaddr + EL3_CMD); int mac_ctrl; if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { @@ -2997,28 +3034,23 @@ static void set_8021q_mode(struct net_device *dev, int enable) if (enable) max_pkt_size += 4; /* 802.1Q VLAN tag */ - EL3WINDOW(3); - iowrite16(max_pkt_size, ioaddr+Wn3_MaxPktSize); + window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); /* set VlanEtherType to let the hardware checksumming treat tagged frames correctly */ - EL3WINDOW(7); - iowrite16(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType); + window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); } else { /* on older cards we have to enable large frames */ vp->large_frames = dev->mtu > 1500 || enable; - EL3WINDOW(3); - mac_ctrl = ioread16(ioaddr+Wn3_MAC_Ctrl); + mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); if (vp->large_frames) mac_ctrl |= 0x40; else mac_ctrl &= ~0x40; - iowrite16(mac_ctrl, ioaddr+Wn3_MAC_Ctrl); + window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); } - - EL3WINDOW(old_window); } #else @@ -3037,7 +3069,10 @@ static void set_8021q_mode(struct net_device *dev, int enable) /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues. */ -#define mdio_delay() ioread32(mdio_addr) +static void mdio_delay(struct vortex_private *vp) +{ + window_read32(vp, 4, Wn4_PhysicalMgmt); +} #define MDIO_SHIFT_CLK 0x01 #define MDIO_DIR_WRITE 0x04 @@ -3048,16 +3083,15 @@ static void set_8021q_mode(struct net_device *dev, int enable) /* Generate the preamble required for initial synchronization and a few older transceivers. */ -static void mdio_sync(void __iomem *ioaddr, int bits) +static void mdio_sync(struct vortex_private *vp, int bits) { - void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt; - /* Establish sync by sending at least 32 logic ones. */ while (-- bits >= 0) { - iowrite16(MDIO_DATA_WRITE1, mdio_addr); - mdio_delay(); - iowrite16(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); - mdio_delay(); + window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); } } @@ -3065,59 +3099,70 @@ static int mdio_read(struct net_device *dev, int phy_id, int location) { int i; struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; unsigned int retval = 0; - void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt; + + spin_lock_bh(&vp->mii_lock); if (mii_preamble_required) - mdio_sync(ioaddr, 32); + mdio_sync(vp, 32); /* Shift the read command bits out. */ for (i = 14; i >= 0; i--) { int dataval = (read_cmd&(1< 0; i--) { - iowrite16(MDIO_ENB_IN, mdio_addr); - mdio_delay(); - retval = (retval << 1) | ((ioread16(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); - iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); - mdio_delay(); + window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + retval = (retval << 1) | + ((window_read16(vp, 4, Wn4_PhysicalMgmt) & + MDIO_DATA_READ) ? 1 : 0); + window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); } + + spin_unlock_bh(&vp->mii_lock); + return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; - void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt; int i; + spin_lock_bh(&vp->mii_lock); + if (mii_preamble_required) - mdio_sync(ioaddr, 32); + mdio_sync(vp, 32); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (write_cmd&(1<= 0; i--) { - iowrite16(MDIO_ENB_IN, mdio_addr); - mdio_delay(); - iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); - mdio_delay(); + window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); } + + spin_unlock_bh(&vp->mii_lock); } /* ACPI: Advanced Configuration and Power Interface. */ @@ -3131,8 +3176,7 @@ static void acpi_set_WOL(struct net_device *dev) if (vp->enable_wol) { /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ - EL3WINDOW(7); - iowrite16(2, ioaddr + 0x0c); + window_write16(vp, 2, 7, 0x0c); /* The RxFilter must accept the WOL frames. */ iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); iowrite16(RxEnable, ioaddr + EL3_CMD); diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 284a5f4a63ac8a48c955125c64e67ae8b89e16f9..4a4f6b81e32de9ae8c85f9dd6aa379927d0ffaa0 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -322,7 +322,7 @@ struct cp_dma_stats { __le32 rx_ok_mcast; __le16 tx_abort; __le16 tx_underrun; -} __attribute__((packed)); +} __packed; struct cp_extra_stats { unsigned long rx_frags; diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 97d8068b372b228d6764055ad1d0529439c940af..f5166dccd8dfa6b0436eb6397b20e7f05adee58e 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -662,7 +662,7 @@ static const struct ethtool_ops rtl8139_ethtool_ops; /* read MMIO register */ #define RTL_R8(reg) ioread8 (ioaddr + (reg)) #define RTL_R16(reg) ioread16 (ioaddr + (reg)) -#define RTL_R32(reg) ((unsigned long) ioread32 (ioaddr + (reg))) +#define RTL_R32(reg) ioread32 (ioaddr + (reg)) static const u16 rtl8139_intr_mask = @@ -862,7 +862,7 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) /* if unknown chip, assume array element #0, original RTL-8139 in this case */ i = 0; dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); - dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig)); + dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig)); tp->chipset = 0; match: @@ -1643,7 +1643,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", tp->cur_tx, tp->dirty_tx); for (i = 0; i < NUM_TX_DESC; i++) - netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n", + netdev_dbg(dev, "Tx descriptor %d is %08x%s\n", i, RTL_R32(TxStatus0 + (i * 4)), i == tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : ""); @@ -2487,7 +2487,7 @@ static void __set_rx_mode (struct net_device *dev) int rx_mode; u32 tmp; - netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n", + netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n", dev->flags, RTL_R32(RxConfig)); /* Note: do not reorder, GCC is clever about common statements. */ diff --git a/drivers/net/82596.c b/drivers/net/82596.c index dd8dc15556cbda41d26691fdbd42308e08a5c911..e2c9c5b949f97f5f67ef481fd32021969889e6f9 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -525,7 +525,21 @@ static irqreturn_t i596_error(int irq, void *dev_id) } #endif -static inline void init_rx_bufs(struct net_device *dev) +static inline void remove_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct i596_rbd *rbd; + int i; + + for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { + if (rbd->skb == NULL) + break; + dev_kfree_skb(rbd->skb); + rbd->skb = NULL; + } +} + +static inline int init_rx_bufs(struct net_device *dev) { struct i596_private *lp = dev->ml_priv; int i; @@ -537,8 +551,11 @@ static inline void init_rx_bufs(struct net_device *dev) for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); - if (skb == NULL) - panic("82596: alloc_skb() failed"); + if (skb == NULL) { + remove_rx_bufs(dev); + return -ENOMEM; + } + skb->dev = dev; rbd->v_next = rbd+1; rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); @@ -574,19 +591,8 @@ static inline void init_rx_bufs(struct net_device *dev) rfd->v_next = lp->rfds; rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); rfd->cmd = CMD_EOL|CMD_FLEX; -} -static inline void remove_rx_bufs(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - struct i596_rbd *rbd; - int i; - - for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { - if (rbd->skb == NULL) - break; - dev_kfree_skb(rbd->skb); - } + return 0; } @@ -1009,20 +1015,35 @@ static int i596_open(struct net_device *dev) } #ifdef ENABLE_MVME16x_NET if (MACH_IS_MVME16x) { - if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) - return -EAGAIN; + if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) { + res = -EAGAIN; + goto err_irq_dev; + } } #endif - init_rx_bufs(dev); + res = init_rx_bufs(dev); + if (res) + goto err_irq_56; netif_start_queue(dev); - /* Initialize the 82596 memory */ if (init_i596_mem(dev)) { res = -EAGAIN; - free_irq(dev->irq, dev); + goto err_queue; } + return 0; + +err_queue: + netif_stop_queue(dev); + remove_rx_bufs(dev); +err_irq_56: +#ifdef ENABLE_MVME16x_NET + free_irq(0x56, dev); +err_irq_dev: +#endif + free_irq(dev->irq, dev); + return res; } @@ -1488,6 +1509,9 @@ static int i596_close(struct net_device *dev) } #endif +#ifdef ENABLE_MVME16x_NET + free_irq(0x56, dev); +#endif free_irq(dev->irq, dev); remove_rx_bufs(dev); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ce2fcdd4ab90521af8a8fb9a3f873b796b0a2bd9..ebe68395ecf8c98cd292052d6e78cf5ad9f4ff31 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -530,14 +530,15 @@ config SH_ETH depends on SUPERH && \ (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ - CPU_SUBTYPE_SH7724) + CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757) select CRC32 select MII select MDIO_BITBANG select PHYLIB help Renesas SuperH Ethernet device driver. - This driver support SH7710, SH7712, SH7763, SH7619, and SH7724. + This driver supporting CPUs are: + - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757. config SUNLANCE tristate "Sun LANCE support" @@ -1463,7 +1464,7 @@ config FORCEDETH config CS89x0 tristate "CS89x0 support" depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ - || ARCH_IXDP2X01 || ARCH_PNX010X || MACH_MX31ADS) + || ARCH_IXDP2X01 || MACH_MX31ADS) ---help--- Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the @@ -1477,7 +1478,7 @@ config CS89x0 config CS89x0_NONISA_IRQ def_bool y depends on CS89x0 != n - depends on MACH_IXDP2351 || ARCH_IXDP2X01 || ARCH_PNX010X || MACH_MX31ADS + depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS config TC35815 tristate "TOSHIBA TC35815 Ethernet support" @@ -1659,6 +1660,7 @@ config R6040 depends on NET_PCI && PCI select CRC32 select MII + select PHYLIB help This is a driver for the R6040 Fast Ethernet MACs found in the the RDC R-321x System-on-chips. @@ -1748,11 +1750,12 @@ config TLAN Please email feedback to . config KS8842 - tristate "Micrel KSZ8842" - depends on HAS_IOMEM + tristate "Micrel KSZ8841/42 with generic bus interface" + depends on HAS_IOMEM && DMA_ENGINE help - This platform driver is for Micrel KSZ8842 / KS8842 - 2-port ethernet switch chip (managed, VLAN, QoS). + This platform driver is for KSZ8841(1-port) / KS8842(2-port) + ethernet switch chip (managed, VLAN, QoS) from Micrel or + Timberdale(FPGA). config KS8851 tristate "Micrel KS8851 SPI" @@ -2601,6 +2604,29 @@ config CHELSIO_T4 To compile this driver as a module choose M here; the module will be called cxgb4. +config CHELSIO_T4VF_DEPENDS + tristate + depends on PCI && INET + default y + +config CHELSIO_T4VF + tristate "Chelsio Communications T4 Virtual Function Ethernet support" + depends on CHELSIO_T4VF_DEPENDS + help + This driver supports Chelsio T4-based gigabit and 10Gb Ethernet + adapters with PCI-E SR-IOV Virtual Functions. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module choose M here; the module + will be called cxgb4vf. + config EHEA tristate "eHEA Ethernet support" depends on IBMEBUS && INET && SPARSEMEM @@ -2614,7 +2640,6 @@ config EHEA config ENIC tristate "Cisco VIC Ethernet NIC Support" depends on PCI && INET - select INET_LRO help This enables the support for the Cisco VIC Ethernet card. diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 0a0512ae77da45c424e09e763545d6d8c7c14ca0..56e8c27f77cebe9ef3b9ac3c1284f7e2975f8448 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_IP1000) += ipg.o obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ obj-$(CONFIG_CHELSIO_T4) += cxgb4/ +obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/ obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_BONDING) += bonding/ @@ -83,8 +84,7 @@ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BNX2) += bnx2.o obj-$(CONFIG_CNIC) += cnic.o -obj-$(CONFIG_BNX2X) += bnx2x.o -bnx2x-objs := bnx2x_main.o bnx2x_link.o +obj-$(CONFIG_BNX2X) += bnx2x/ spidernet-y += spider_net.o spider_net_ethtool.o obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o obj-$(CONFIG_GELIC_NET) += ps3_gelic.o @@ -275,7 +275,7 @@ obj-$(CONFIG_USB_USBNET) += usb/ obj-$(CONFIG_USB_ZD1201) += usb/ obj-$(CONFIG_USB_IPHETH) += usb/ -obj-y += wireless/ +obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_NET_TULIP) += tulip/ obj-$(CONFIG_HAMRADIO) += hamradio/ obj-$(CONFIG_IRDA) += irda/ diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 3b79c6cf21a3c6f3e495f4d8fd0efbb16c263f6e..9bb405bd664e38fd571e72375224a1012d76dcd3 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -218,12 +218,6 @@ static struct devprobe2 isa_probes[] __initdata = { #ifdef CONFIG_EL1 /* 3c501 */ {el1_probe, 0}, #endif -#ifdef CONFIG_WAVELAN /* WaveLAN */ - {wavelan_probe, 0}, -#endif -#ifdef CONFIG_ARLAN /* Aironet */ - {arlan_probe, 0}, -#endif #ifdef CONFIG_EL16 /* 3c507 */ {el16_probe, 0}, #endif diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index b9115a776fddb31d998d1e24712214ef326c1343..5181e932211946b1f688d3075cc0dc95264709dc 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c @@ -211,7 +211,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev) retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (retval) { printk (" nothing! Unable to get IRQ %d.\n", dev->irq); - goto out1; + goto out; } printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]); diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 355797f7004808938ef9684df1693457c5b0a7ca..42fce91b71fc9c613fa894e910ceecd4fd9eb037 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -37,69 +37,6 @@ #define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n" - -static void rx(struct net_device *dev, int bufnum, - struct archdr *pkthdr, int length); -static int build_header(struct sk_buff *skb, - struct net_device *dev, - unsigned short type, - uint8_t daddr); -static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, - int bufnum); -static int ack_tx(struct net_device *dev, int acked); - - -static struct ArcProto capmode_proto = -{ - 'r', - XMTU, - 0, - rx, - build_header, - prepare_tx, - NULL, - ack_tx -}; - - -static void arcnet_cap_init(void) -{ - int count; - - for (count = 1; count <= 8; count++) - if (arc_proto_map[count] == arc_proto_default) - arc_proto_map[count] = &capmode_proto; - - /* for cap mode, we only set the bcast proto if there's no better one */ - if (arc_bcast_proto == arc_proto_default) - arc_bcast_proto = &capmode_proto; - - arc_proto_default = &capmode_proto; - arc_raw_proto = &capmode_proto; -} - - -#ifdef MODULE - -static int __init capmode_module_init(void) -{ - printk(VERSION); - arcnet_cap_init(); - return 0; -} - -static void __exit capmode_module_exit(void) -{ - arcnet_unregister_proto(&capmode_proto); -} -module_init(capmode_module_init); -module_exit(capmode_module_exit); - -MODULE_LICENSE("GPL"); -#endif /* MODULE */ - - - /* packet receiver */ static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) @@ -231,65 +168,107 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n", length,ofs); - // Copy the arcnet-header + the protocol byte down: + /* Copy the arcnet-header + the protocol byte down: */ lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto, sizeof(pkt->soft.cap.proto)); - // Skip the extra integer we have written into it as a cookie - // but write the rest of the message: + /* Skip the extra integer we have written into it as a cookie + but write the rest of the message: */ lp->hw.copy_to_card(dev, bufnum, ofs+1, ((unsigned char*)&pkt->soft.cap.mes),length-1); lp->lastload_dest = hard->dest; - return 1; /* done */ + return 1; /* done */ } - static int ack_tx(struct net_device *dev, int acked) { - struct arcnet_local *lp = netdev_priv(dev); - struct sk_buff *ackskb; - struct archdr *ackpkt; - int length=sizeof(struct arc_cap); + struct arcnet_local *lp = netdev_priv(dev); + struct sk_buff *ackskb; + struct archdr *ackpkt; + int length=sizeof(struct arc_cap); - BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n", - lp->outgoing.skb->protocol, acked); + BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n", + lp->outgoing.skb->protocol, acked); - BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx"); + BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx"); - /* Now alloc a skb to send back up through the layers: */ - ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC); - if (ackskb == NULL) { - BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n"); - goto free_outskb; - } + /* Now alloc a skb to send back up through the layers: */ + ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC); + if (ackskb == NULL) { + BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n"); + goto free_outskb; + } + + skb_put(ackskb, length + ARC_HDR_SIZE ); + ackskb->dev = dev; + + skb_reset_mac_header(ackskb); + ackpkt = (struct archdr *)skb_mac_header(ackskb); + /* skb_pull(ackskb, ARC_HDR_SIZE); */ - skb_put(ackskb, length + ARC_HDR_SIZE ); - ackskb->dev = dev; + skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, + ARC_HDR_SIZE + sizeof(struct arc_cap)); + ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ + ackpkt->soft.cap.mes.ack=acked; - skb_reset_mac_header(ackskb); - ackpkt = (struct archdr *)skb_mac_header(ackskb); - /* skb_pull(ackskb, ARC_HDR_SIZE); */ + BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", + *((int*)&ackpkt->soft.cap.cookie[0])); + ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); - skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, - ARC_HDR_SIZE + sizeof(struct arc_cap)); - ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ - ackpkt->soft.cap.mes.ack=acked; + BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); + netif_rx(ackskb); - BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", - *((int*)&ackpkt->soft.cap.cookie[0])); +free_outskb: + dev_kfree_skb_irq(lp->outgoing.skb); + lp->outgoing.proto = NULL; /* We are always finished when in this protocol */ - ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); + return 0; +} - BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); - netif_rx(ackskb); +static struct ArcProto capmode_proto = +{ + 'r', + XMTU, + 0, + rx, + build_header, + prepare_tx, + NULL, + ack_tx +}; - free_outskb: - dev_kfree_skb_irq(lp->outgoing.skb); - lp->outgoing.proto = NULL; /* We are always finished when in this protocol */ +static void arcnet_cap_init(void) +{ + int count; - return 0; + for (count = 1; count <= 8; count++) + if (arc_proto_map[count] == arc_proto_default) + arc_proto_map[count] = &capmode_proto; + + /* for cap mode, we only set the bcast proto if there's no better one */ + if (arc_bcast_proto == arc_proto_default) + arc_bcast_proto = &capmode_proto; + + arc_proto_default = &capmode_proto; + arc_raw_proto = &capmode_proto; } + +static int __init capmode_module_init(void) +{ + printk(VERSION); + arcnet_cap_init(); + return 0; +} + +static void __exit capmode_module_exit(void) +{ + arcnet_unregister_proto(&capmode_proto); +} +module_init(capmode_module_init); +module_exit(capmode_module_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index 0402da30a4ed22c9781a051a2b65cda98767bae2..37272827ee555cf87f6712069f2ba1410f6dbcee 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -90,14 +90,14 @@ static int __init com20020isa_probe(struct net_device *dev) outb(0, _INTMASK); dev->irq = probe_irq_off(airqmask); - if (dev->irq <= 0) { + if ((int)dev->irq <= 0) { BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed first time\n"); airqmask = probe_irq_on(); outb(NORXflag, _INTMASK); udelay(5); outb(0, _INTMASK); dev->irq = probe_irq_off(airqmask); - if (dev->irq <= 0) { + if ((int)dev->irq <= 0) { BUGMSG(D_NORMAL, "Autoprobe IRQ failed.\n"); err = -ENODEV; goto out; diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 4cb401813b7e7ec9af98ec10334ec8a99b53069f..eb27976dab373134478409f64b9f65c6a6ea4219 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -213,7 +213,7 @@ static int __init com90io_probe(struct net_device *dev) outb(0, _INTMASK); dev->irq = probe_irq_off(airqmask); - if (dev->irq <= 0) { + if ((int)dev->irq <= 0) { BUGMSG(D_INIT_REASONS, "Autoprobe IRQ failed\n"); goto err_out; } diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 24df0325090c8ae8075f09afcc3a60bb67ce972c..4f1cc7164ad9f41435e338a31286c4746120f90b 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -738,6 +738,17 @@ static void eth_set_mcast_list(struct net_device *dev) struct netdev_hw_addr *ha; u8 diffs[ETH_ALEN], *addr; int i; + static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; + + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < ETH_ALEN; i++) { + __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); + __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); + } + __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, + &port->regs->rx_control[0]); + return; + } if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, @@ -771,7 +782,8 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) if (!netif_running(dev)) return -EINVAL; - return phy_mii_ioctl(port->phydev, if_mii(req), cmd); + + return phy_mii_ioctl(port->phydev, req, cmd); } /* ethtool support */ diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c index 2e852463382b3e166db62dfde492ba7d5141dd78..4545d5a06c24f9e9cea64a70db01327a1d52ad21 100644 --- a/drivers/net/arm/w90p910_ether.c +++ b/drivers/net/arm/w90p910_ether.c @@ -822,6 +822,9 @@ static int w90p910_ether_open(struct net_device *dev) w90p910_set_global_maccmd(dev); w90p910_enable_rx(dev, 1); + clk_enable(ether->rmiiclk); + clk_enable(ether->clk); + ether->rx_packets = 0x0; ether->rx_bytes = 0x0; diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 93185f5f09accd10ec947b2b30179071703e4f8e..89876897a6fed5244358740795e98207507799a6 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -811,10 +811,8 @@ static int net_close(struct net_device *dev) /* No statistic counters on the chip to update. */ /* Disable the IRQ on boards of fmv18x where it is feasible. */ - if (lp->jumpered) { + if (lp->jumpered) outb(0x00, ioaddr + IOCONFIG1); - free_irq(dev->irq, dev); - } /* Power-down the chip. Green, green, green! */ outb(0x00, ioaddr + CONFIG_1); diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h index 84ae905bf732c4ac0e80ec18fbf83825d0b945f3..52abbbdf8a08c46df73b8e83d2566fe741f8043d 100644 --- a/drivers/net/atl1c/atl1c.h +++ b/drivers/net/atl1c/atl1c.h @@ -73,7 +73,8 @@ #define FULL_DUPLEX 2 #define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) -#define MAX_JUMBO_FRAME_SIZE (9*1024) +#define MAX_JUMBO_FRAME_SIZE (6*1024) +#define MAX_TSO_FRAME_SIZE (7*1024) #define MAX_TX_OFFLOAD_THRESH (9*1024) #define AT_MAX_RECEIVE_QUEUE 4 @@ -87,10 +88,11 @@ #define AT_MAX_INT_WORK 5 #define AT_TWSI_EEPROM_TIMEOUT 100 #define AT_HW_MAX_IDLE_DELAY 10 -#define AT_SUSPEND_LINK_TIMEOUT 28 +#define AT_SUSPEND_LINK_TIMEOUT 100 #define AT_ASPM_L0S_TIMER 6 #define AT_ASPM_L1_TIMER 12 +#define AT_LCKDET_TIMER 12 #define ATL1C_PCIE_L0S_L1_DISABLE 0x01 #define ATL1C_PCIE_PHY_RESET 0x02 @@ -316,6 +318,7 @@ enum atl1c_nic_type { athr_l2c_b, athr_l2c_b2, athr_l1d, + athr_l1d_2, }; enum atl1c_trans_queue { @@ -392,6 +395,8 @@ struct atl1c_hw { u16 subsystem_id; u16 subsystem_vendor_id; u8 revision_id; + u16 phy_id1; + u16 phy_id2; u32 intr_mask; u8 dmaw_dly_cnt; diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c index f1389d664a21b47634f39deebcad13383536b754..d8501f060957afac49afd7726158020aa96de4b2 100644 --- a/drivers/net/atl1c/atl1c_hw.c +++ b/drivers/net/atl1c/atl1c_hw.c @@ -37,6 +37,9 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw) if (data & TWSI_DEBUG_DEV_EXIST) return 1; + AT_READ_REG(hw, REG_MASTER_CTRL, &data); + if (data & MASTER_CTRL_OTP_SEL) + return 1; return 0; } @@ -69,6 +72,8 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) u32 i; u32 otp_ctrl_data; u32 twsi_ctrl_data; + u32 ltssm_ctrl_data; + u32 wol_data; u8 eth_addr[ETH_ALEN]; u16 phy_data; bool raise_vol = false; @@ -104,6 +109,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) udelay(20); raise_vol = true; } + /* close open bit of ReadOnly*/ + AT_READ_REG(hw, REG_LTSSM_ID_CTRL, <ssm_ctrl_data); + ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO; + AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data); + + /* clear any WOL settings */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + AT_READ_REG(hw, REG_WOL_CTRL, &wol_data); + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; @@ -119,17 +133,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) } /* Disable OTP_CLK */ if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { - if (otp_ctrl_data & OTP_CTRL_CLK_EN) { - otp_ctrl_data &= ~OTP_CTRL_CLK_EN; - AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); - AT_WRITE_FLUSH(hw); - msleep(1); - } + otp_ctrl_data &= ~OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + msleep(1); } if (raise_vol) { if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 || - hw->nic_type == athr_l1d) { + hw->nic_type == athr_l1d || + hw->nic_type == athr_l1d_2) { atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) goto out; @@ -456,14 +468,22 @@ int atl1c_phy_reset(struct atl1c_hw *hw) if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 || - hw->nic_type == athr_l1d) { + hw->nic_type == athr_l1d || + hw->nic_type == athr_l1d_2) { atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7); msleep(20); } - - /*Enable PHY LinkChange Interrupt */ + if (hw->nic_type == athr_l1d) { + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); + atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D); + } + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2 + || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) { + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); + atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); + } err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); if (err) { if (netif_msg_hw(adapter)) @@ -482,12 +502,10 @@ int atl1c_phy_init(struct atl1c_hw *hw) struct pci_dev *pdev = adapter->pdev; int ret_val; u16 mii_bmcr_data = BMCR_RESET; - u16 phy_id1, phy_id2; - if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) || - (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) { - if (netif_msg_link(adapter)) - dev_err(&pdev->dev, "Error get phy ID\n"); + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || + (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { + dev_err(&pdev->dev, "Error get phy ID\n"); return -1; } switch (hw->media_type) { @@ -572,6 +590,65 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) return 0; } +int atl1c_phy_power_saving(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret = 0; + u16 autoneg_advertised = ADVERTISED_10baseT_Half; + u16 save_autoneg_advertised; + u16 phy_data; + u16 mii_lpa_data; + u16 speed = SPEED_0; + u16 duplex = FULL_DUPLEX; + int i; + + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data); + if (mii_lpa_data & LPA_10FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else if (mii_lpa_data & LPA_10HALF) + autoneg_advertised = ADVERTISED_10baseT_Half; + else if (mii_lpa_data & LPA_100HALF) + autoneg_advertised = ADVERTISED_100baseT_Half; + else if (mii_lpa_data & LPA_100FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + + save_autoneg_advertised = hw->autoneg_advertised; + hw->phy_configured = false; + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + dev_dbg(&pdev->dev, "phy autoneg failed\n"); + ret = -1; + } + hw->autoneg_advertised = save_autoneg_advertised; + + if (mii_lpa_data) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + mdelay(100); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + if (atl1c_get_speed_and_duplex(hw, &speed, + &duplex) != 0) + dev_dbg(&pdev->dev, + "get speed and duplex failed\n"); + break; + } + } + } + } else { + speed = SPEED_10; + duplex = HALF_DUPLEX; + } + adapter->link_speed = speed; + adapter->link_duplex = duplex; + + return ret; +} + int atl1c_restart_autoneg(struct atl1c_hw *hw) { int err = 0; diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h index 1eeb3ed9f0cbd10139e1850f13742bce567e6a5c..3dd675979aa17ebfcfeda3d345981355a924c497 100644 --- a/drivers/net/atl1c/atl1c_hw.h +++ b/drivers/net/atl1c/atl1c_hw.h @@ -42,7 +42,7 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value); int atl1c_phy_init(struct atl1c_hw *hw); int atl1c_check_eeprom_exist(struct atl1c_hw *hw); int atl1c_restart_autoneg(struct atl1c_hw *hw); - +int atl1c_phy_power_saving(struct atl1c_hw *hw); /* register definition */ #define REG_DEVICE_CAP 0x5C #define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 @@ -120,6 +120,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); #define REG_PCIE_PHYMISC 0x1000 #define PCIE_PHYMISC_FORCE_RCV_DET 0x4 +#define REG_PCIE_PHYMISC2 0x1004 +#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3 +#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16 +#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3 +#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18 + #define REG_TWSI_DEBUG 0x1108 #define TWSI_DEBUG_DEV_EXIST 0x20000000 @@ -150,24 +156,28 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); #define PM_CTRL_ASPM_L0S_EN 0x00001000 #define PM_CTRL_CLK_SWH_L1 0x00002000 #define PM_CTRL_CLK_PWM_VER1_1 0x00004000 -#define PM_CTRL_PCIE_RECV 0x00008000 +#define PM_CTRL_RCVR_WT_TIMER 0x00008000 #define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF #define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 #define PM_CTRL_PM_REQ_TIMER_MASK 0xF #define PM_CTRL_PM_REQ_TIMER_SHIFT 20 -#define PM_CTRL_LCKDET_TIMER_MASK 0x3F +#define PM_CTRL_LCKDET_TIMER_MASK 0xF #define PM_CTRL_LCKDET_TIMER_SHIFT 24 #define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 #define PM_CTRL_SA_DLY_EN 0x20000000 #define PM_CTRL_MAC_ASPM_CHK 0x40000000 #define PM_CTRL_HOTRST 0x80000000 +#define REG_LTSSM_ID_CTRL 0x12FC +#define LTSSM_ID_EN_WRO 0x1000 /* Selene Master Control Register */ #define REG_MASTER_CTRL 0x1400 #define MASTER_CTRL_SOFT_RST 0x1 #define MASTER_CTRL_TEST_MODE_MASK 0x3 #define MASTER_CTRL_TEST_MODE_SHIFT 2 #define MASTER_CTRL_BERT_START 0x10 +#define MASTER_CTRL_OOB_DIS_OFF 0x40 +#define MASTER_CTRL_SA_TIMER_EN 0x80 #define MASTER_CTRL_MTIMER_EN 0x100 #define MASTER_CTRL_MANUAL_INT 0x200 #define MASTER_CTRL_TX_ITIMER_EN 0x400 @@ -220,6 +230,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); GPHY_CTRL_PWDOWN_HW |\ GPHY_CTRL_PHY_IDDQ) +#define GPHY_CTRL_POWER_SAVING ( \ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_EN |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_PWDOWN_HW |\ + GPHY_CTRL_PHY_IDDQ) /* Block IDLE Status Register */ #define REG_IDLE_STATUS 0x1410 #define IDLE_STATUS_MASK 0x00FF @@ -287,6 +303,14 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); #define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal * comes from Analog SerDes */ #define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ +#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE +#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3 +#define SERDES_OVCLK_18_25 0x0 +#define SERDES_OVCLK_12_18 0x1 +#define SERDES_OVCLK_0_4 0x2 +#define SERDES_OVCLK_4_12 0x3 +#define SERDES_MAC_CLK_SLOWDOWN 0x20000 +#define SERDES_PYH_CLK_SLOWDOWN 0x40000 /* MAC Control Register */ #define REG_MAC_CTRL 0x1480 @@ -693,6 +717,21 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); #define REG_MAC_TX_STATUS_BIN 0x1760 #define REG_MAC_TX_STATUS_END 0x17c0 +#define REG_CLK_GATING_CTRL 0x1814 +#define CLK_GATING_DMAW_EN 0x0001 +#define CLK_GATING_DMAR_EN 0x0002 +#define CLK_GATING_TXQ_EN 0x0004 +#define CLK_GATING_RXQ_EN 0x0008 +#define CLK_GATING_TXMAC_EN 0x0010 +#define CLK_GATING_RXMAC_EN 0x0020 + +#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\ + CLK_GATING_DMAR_EN |\ + CLK_GATING_TXQ_EN |\ + CLK_GATING_RXQ_EN |\ + CLK_GATING_TXMAC_EN|\ + CLK_GATING_RXMAC_EN) + /* DEBUG ADDR */ #define REG_DEBUG_DATA0 0x1900 #define REG_DEBUG_DATA1 0x1904 @@ -734,6 +773,10 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw); #define MII_PHYSID1 0x02 #define MII_PHYSID2 0x03 +#define L1D_MPW_PHYID1 0xD01C /* V7 */ +#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ +#define L1D_MPW_PHYID3 0xD01E /* V8 */ + /* Autoneg Advertisement Register */ #define MII_ADVERTISE 0x04 diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index 1c3c046d5f34d610a05caed24fd69cff28042397..c7b8ef507ebd4d71fd653240c1a1f26183c4daf1 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -21,7 +21,7 @@ #include "atl1c.h" -#define ATL1C_DRV_VERSION "1.0.0.2-NAPI" +#define ATL1C_DRV_VERSION "1.0.1.0-NAPI" char atl1c_driver_name[] = "atl1c"; char atl1c_driver_version[] = ATL1C_DRV_VERSION; #define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 @@ -29,7 +29,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION; #define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ #define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ #define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ - +#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */ #define L2CB_V10 0xc0 #define L2CB_V11 0xc1 @@ -97,7 +97,28 @@ static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] = static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; +static void atl1c_pcie_patch(struct atl1c_hw *hw) +{ + u32 data; + AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); + data |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); + + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { + AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); + + data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK << + PCIE_PHYMISC2_SERDES_CDR_SHIFT); + data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; + data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK << + PCIE_PHYMISC2_SERDES_TH_SHIFT); + data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); + } +} + +/* FIXME: no need any more ? */ /* * atl1c_init_pcie - init PCIE module */ @@ -127,6 +148,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) data &= ~PCIE_UC_SERVRITY_FCP; AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); + AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); + data &= ~LTSSM_ID_EN_WRO; + AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data); + + atl1c_pcie_patch(hw); if (flag & ATL1C_PCIE_L0S_L1_DISABLE) atl1c_disable_l0s_l1(hw); if (flag & ATL1C_PCIE_PHY_RESET) @@ -135,7 +161,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); - msleep(1); + msleep(5); } /* @@ -159,6 +185,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) { atomic_inc(&adapter->irq_sem); AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); AT_WRITE_FLUSH(&adapter->hw); synchronize_irq(adapter->pdev->irq); } @@ -231,15 +258,15 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) if ((phy_data & BMSR_LSTATUS) == 0) { /* link down */ - if (netif_carrier_ok(netdev)) { - hw->hibernate = true; - if (atl1c_stop_mac(hw) != 0) - if (netif_msg_hw(adapter)) - dev_warn(&pdev->dev, - "stop mac failed\n"); - atl1c_set_aspm(hw, false); - } + hw->hibernate = true; + if (atl1c_stop_mac(hw) != 0) + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, "stop mac failed\n"); + atl1c_set_aspm(hw, false); netif_carrier_off(netdev); + netif_stop_queue(netdev); + atl1c_phy_reset(hw); + atl1c_phy_init(&adapter->hw); } else { /* Link Up */ hw->hibernate = false; @@ -308,6 +335,7 @@ static void atl1c_common_task(struct work_struct *work) netdev = adapter->netdev; if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { + adapter->work_event &= ~ATL1C_WORK_EVENT_RESET; netif_device_detach(netdev); atl1c_down(adapter); atl1c_up(adapter); @@ -315,8 +343,11 @@ static void atl1c_common_task(struct work_struct *work) return; } - if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) + if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) { + adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE; atl1c_check_link_status(adapter); + } + return; } @@ -476,6 +507,13 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; adapter->hw.max_frame_size = new_mtu; atl1c_set_rxbufsize(adapter, netdev); + if (new_mtu > MAX_TSO_FRAME_SIZE) { + adapter->netdev->features &= ~NETIF_F_TSO; + adapter->netdev->features &= ~NETIF_F_TSO6; + } else { + adapter->netdev->features |= NETIF_F_TSO; + adapter->netdev->features |= NETIF_F_TSO6; + } atl1c_down(adapter); atl1c_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); @@ -613,6 +651,9 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw) case PCI_DEVICE_ID_ATHEROS_L1D: hw->nic_type = athr_l1d; break; + case PCI_DEVICE_ID_ATHEROS_L1D_2_0: + hw->nic_type = athr_l1d_2; + break; default: break; } @@ -627,9 +668,7 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data); AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); - hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ | - ATL1C_INTR_MODRT_ENABLE | - ATL1C_RX_IPV6_CHKSUM | + hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | ATL1C_TXQ_MODE_ENHANCE; if (link_ctrl_data & LINK_CTRL_L0S_EN) hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; @@ -637,12 +676,12 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; if (link_ctrl_data & LINK_CTRL_EXT_SYNC) hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC; + hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; if (hw->nic_type == athr_l1c || - hw->nic_type == athr_l1d) { - hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; + hw->nic_type == athr_l1d || + hw->nic_type == athr_l1d_2) hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; - } return 0; } /* @@ -657,6 +696,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; + u32 revision; + adapter->wol = 0; adapter->link_speed = SPEED_0; @@ -669,7 +710,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; - + AT_READ_REG(hw, PCI_CLASS_REVISION, &revision); + hw->revision_id = revision & 0xFF; /* before link up, we assume hibernate is true */ hw->hibernate = true; hw->media_type = MEDIA_TYPE_AUTO_SENSOR; @@ -974,6 +1016,7 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb; struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb; int i; + u32 data; /* TPD */ AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, @@ -1017,6 +1060,23 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32)); AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO, (u32)(smb->dma & AT_DMA_LO_ADDR_MASK)); + if (hw->nic_type == athr_l2c_b) { + AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); + AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L); + AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L); + AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L); + AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ + AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ + } + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) { + /* Power Saving for L2c_B */ + AT_READ_REG(hw, REG_SERDES_LOCK, &data); + data |= SERDES_MAC_CLK_SLOWDOWN; + data |= SERDES_PYH_CLK_SLOWDOWN; + AT_WRITE_REG(hw, REG_SERDES_LOCK, data); + } /* Load all of base address above */ AT_WRITE_REG(hw, REG_LOAD_PTR, 1); } @@ -1029,6 +1089,7 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter) u16 tx_offload_thresh; u32 txq_ctrl_data; u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ + u32 max_pay_load_data; extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; @@ -1046,8 +1107,11 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter) TXQ_NUM_TPD_BURST_SHIFT; if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) txq_ctrl_data |= TXQ_CTRL_ENH_MODE; - txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] & + max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] & TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) + max_pay_load_data >>= 1; + txq_ctrl_data |= max_pay_load_data; AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); } @@ -1078,7 +1142,7 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter) rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) << RSS_HASH_BITS_SHIFT; if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON) - rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M & + rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M & ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT; AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); @@ -1198,21 +1262,23 @@ static int atl1c_reset_mac(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; struct pci_dev *pdev = adapter->pdev; - int ret; + u32 master_ctrl_data = 0; AT_WRITE_REG(hw, REG_IMR, 0); AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT); - ret = atl1c_stop_mac(hw); - if (ret) - return ret; + atl1c_stop_mac(hw); /* * Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ - AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); + master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF; + AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST) + & 0xFFFF)); + AT_WRITE_FLUSH(hw); msleep(10); /* Wait at least 10ms for All module to be Idle */ @@ -1253,42 +1319,39 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) { u32 pm_ctrl_data; u32 link_ctrl_data; + u32 link_l1_timer = 0xF; AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); - pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; + pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << PM_CTRL_L1_ENTRY_TIMER_SHIFT); pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << - PM_CTRL_LCKDET_TIMER_SHIFT); - - pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK; - pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; - pm_ctrl_data |= PM_CTRL_RBER_EN; - pm_ctrl_data |= PM_CTRL_SDES_EN; + PM_CTRL_LCKDET_TIMER_SHIFT); + pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT; - if (hw->nic_type == athr_l2c_b || - hw->nic_type == athr_l1d || - hw->nic_type == athr_l2c_b2) { + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || + hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { - if (hw->nic_type == athr_l2c_b && - hw->revision_id == L2CB_V10) + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) link_ctrl_data |= LINK_CTRL_EXT_SYNC; } AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); - pm_ctrl_data |= PM_CTRL_PCIE_RECV; - pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT; - pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S; + pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER; + pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK << + PM_CTRL_PM_REQ_TIMER_SHIFT); + pm_ctrl_data |= AT_ASPM_L1_TIMER << + PM_CTRL_PM_REQ_TIMER_SHIFT; pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN; pm_ctrl_data &= ~PM_CTRL_HOTRST; pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT; pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1; } - + pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK; if (linkup) { pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; @@ -1297,27 +1360,26 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN; - if (hw->nic_type == athr_l2c_b || - hw->nic_type == athr_l1d || - hw->nic_type == athr_l2c_b2) { + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || + hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { if (hw->nic_type == athr_l2c_b) if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) - pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN; + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; - if (hw->adapter->link_speed == SPEED_100 || - hw->adapter->link_speed == SPEED_1000) { - pm_ctrl_data &= - ~(PM_CTRL_L1_ENTRY_TIMER_MASK << - PM_CTRL_L1_ENTRY_TIMER_SHIFT); - if (hw->nic_type == athr_l1d) - pm_ctrl_data |= 0xF << - PM_CTRL_L1_ENTRY_TIMER_SHIFT; - else - pm_ctrl_data |= 7 << - PM_CTRL_L1_ENTRY_TIMER_SHIFT; + if (hw->adapter->link_speed == SPEED_100 || + hw->adapter->link_speed == SPEED_1000) { + pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << + PM_CTRL_L1_ENTRY_TIMER_SHIFT); + if (hw->nic_type == athr_l2c_b) + link_l1_timer = 7; + else if (hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d_2) + link_l1_timer = 4; + pm_ctrl_data |= link_l1_timer << + PM_CTRL_L1_ENTRY_TIMER_SHIFT; } } else { pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; @@ -1326,24 +1388,12 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; - } - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); - if (hw->adapter->link_speed == SPEED_10) - if (hw->nic_type == athr_l1d) - atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D); - else - atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); - else if (hw->adapter->link_speed == SPEED_100) - atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD); - else - atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD); + } } else { - pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; - pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) @@ -1351,8 +1401,9 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) else pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; } - AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); + + return; } static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) @@ -1391,7 +1442,8 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; - if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) { + if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d_2) { mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW; mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32; } @@ -1409,6 +1461,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter) struct atl1c_hw *hw = &adapter->hw; u32 master_ctrl_data = 0; u32 intr_modrt_data; + u32 data; /* clear interrupt status */ AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); @@ -1418,6 +1471,15 @@ static int atl1c_configure(struct atl1c_adapter *adapter) * HW will enable self to assert interrupt event to system after * waiting x-time for software to notify it accept interrupt. */ + + data = CLK_GATING_EN_ALL; + if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) { + if (hw->nic_type == athr_l2c_b) + data &= ~CLK_GATING_RXMAC_EN; + } else + data = 0; + AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data); + AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, hw->ict & INT_RETRIG_TIMER_MASK); @@ -1436,6 +1498,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter) if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) master_ctrl_data |= MASTER_CTRL_INT_RDCLR; + master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { @@ -1624,11 +1687,9 @@ static irqreturn_t atl1c_intr(int irq, void *data) "atl1c hardware error (status = 0x%x)\n", status & ISR_ERROR); /* reset MAC */ - hw->intr_mask &= ~ISR_ERROR; - AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); adapter->work_event |= ATL1C_WORK_EVENT_RESET; schedule_work(&adapter->common_task); - break; + return IRQ_HANDLED; } if (status & ISR_OVER) @@ -2303,7 +2364,6 @@ void atl1c_down(struct atl1c_adapter *adapter) napi_disable(&adapter->napi); atl1c_irq_disable(adapter); atl1c_free_irq(adapter); - AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); /* reset MAC to disable all RX/TX */ atl1c_reset_mac(&adapter->hw); msleep(1); @@ -2387,79 +2447,68 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state) struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; - u32 ctrl; - u32 mac_ctrl_data; - u32 master_ctrl_data; + u32 mac_ctrl_data = 0; + u32 master_ctrl_data = 0; u32 wol_ctrl_data = 0; - u16 mii_bmsr_data; - u16 save_autoneg_advertised; - u16 mii_intr_status_data; + u16 mii_intr_status_data = 0; u32 wufc = adapter->wol; - u32 i; int retval = 0; + atl1c_disable_l0s_l1(hw); if (netif_running(netdev)) { WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); atl1c_down(adapter); } netif_device_detach(netdev); - atl1c_disable_l0s_l1(hw); retval = pci_save_state(pdev); if (retval) return retval; + + if (wufc) + if (atl1c_phy_power_saving(hw) != 0) + dev_dbg(&pdev->dev, "phy power saving failed"); + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); + + master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; + mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT); + mac_ctrl_data |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT); + mac_ctrl_data &= ~MAC_CTRL_DUPLX; + if (wufc) { - AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); - master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; - - /* get link status */ - atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); - atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); - save_autoneg_advertised = hw->autoneg_advertised; - hw->autoneg_advertised = ADVERTISED_10baseT_Half; - if (atl1c_restart_autoneg(hw) != 0) - if (netif_msg_link(adapter)) - dev_warn(&pdev->dev, "phy autoneg failed\n"); - hw->phy_configured = false; /* re-init PHY when resume */ - hw->autoneg_advertised = save_autoneg_advertised; + mac_ctrl_data |= MAC_CTRL_RX_EN; + if (adapter->link_speed == SPEED_1000 || + adapter->link_speed == SPEED_0) { + mac_ctrl_data |= atl1c_mac_speed_1000 << + MAC_CTRL_SPEED_SHIFT; + mac_ctrl_data |= MAC_CTRL_DUPLX; + } else + mac_ctrl_data |= atl1c_mac_speed_10_100 << + MAC_CTRL_SPEED_SHIFT; + + if (adapter->link_duplex == DUPLEX_FULL) + mac_ctrl_data |= MAC_CTRL_DUPLX; + /* turn on magic packet wol */ if (wufc & AT_WUFC_MAG) - wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; if (wufc & AT_WUFC_LNKC) { - for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { - msleep(100); - atl1c_read_phy_reg(hw, MII_BMSR, - (u16 *)&mii_bmsr_data); - if (mii_bmsr_data & BMSR_LSTATUS) - break; - } - if ((mii_bmsr_data & BMSR_LSTATUS) == 0) - if (netif_msg_link(adapter)) - dev_warn(&pdev->dev, - "%s: Link may change" - "when suspend\n", - atl1c_driver_name); wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; /* only link up can wake up */ if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { - if (netif_msg_link(adapter)) - dev_err(&pdev->dev, - "%s: read write phy " - "register failed.\n", - atl1c_driver_name); - goto wol_dis; + dev_dbg(&pdev->dev, "%s: read write phy " + "register failed.\n", + atl1c_driver_name); } } /* clear phy interrupt */ atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data); /* Config MAC Ctrl register */ - mac_ctrl_data = MAC_CTRL_RX_EN; - /* set to 10/100M halt duplex */ - mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT; - mac_ctrl_data |= (((u32)adapter->hw.preamble_len & - MAC_CTRL_PRMLEN_MASK) << - MAC_CTRL_PRMLEN_SHIFT); - if (adapter->vlgrp) mac_ctrl_data |= MAC_CTRL_RMV_VLAN; @@ -2467,37 +2516,30 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state) if (wufc & AT_WUFC_MAG) mac_ctrl_data |= MAC_CTRL_BC_EN; - if (netif_msg_hw(adapter)) - dev_dbg(&pdev->dev, - "%s: suspend MAC=0x%x\n", - atl1c_driver_name, mac_ctrl_data); + dev_dbg(&pdev->dev, + "%s: suspend MAC=0x%x\n", + atl1c_driver_name, mac_ctrl_data); AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); /* pcie patch */ - AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); - ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; - AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + device_set_wakeup_enable(&pdev->dev, 1); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); - goto suspend_exit; + AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | + GPHY_CTRL_EXT_RESET); + pci_prepare_to_sleep(pdev); + } else { + AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING); + master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS; + mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT; + mac_ctrl_data |= MAC_CTRL_DUPLX; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + hw->phy_configured = false; /* re-init PHY when resume */ + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); } -wol_dis: - - /* WOL disabled */ - AT_WRITE_REG(hw, REG_WOL_CTRL, 0); - - /* pcie patch */ - AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); - ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; - AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); - - atl1c_phy_disable(hw); - hw->phy_configured = false; /* re-init PHY when resume */ - - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); -suspend_exit: pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); @@ -2516,9 +2558,19 @@ static int atl1c_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3cold, 0); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | + ATL1C_PCIE_PHY_RESET); atl1c_phy_reset(&adapter->hw); atl1c_reset_mac(&adapter->hw); + atl1c_phy_init(&adapter->hw); + +#if 0 + AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data); + pm_data &= ~PM_CTRLSTAT_PME_EN; + AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data); +#endif + netif_device_attach(netdev); if (netif_running(netdev)) atl1c_up(adapter); diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 146372fd66832843996b05216dd70d1e1d612c85..9c0ddb273ac81a38c0de24fa315c4c9d5519cde4 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -436,8 +436,8 @@ struct rx_free_desc { __le16 buf_len; /* Size of the receive buffer in host memory */ u16 coalese; /* Update consumer index to host after the * reception of this frame */ - /* __attribute__ ((packed)) is required */ -} __attribute__ ((packed)); + /* __packed is required */ +} __packed; /* * The L1 transmit packet descriptor is comprised of four 32-bit words. diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index ece6128bef146aa330f98f486d23f2bed1c15ed5..386d4feec652b4d33ec0b5648c176b3b2048d0bf 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -978,7 +978,7 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!aup->phy_dev) return -EINVAL; /* PHY not controllable */ - return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); + return phy_mii_ioctl(aup->phy_dev, rq, cmd); } static const struct net_device_ops au1000_netdev_ops = { diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 55c9958043c43266c0d5b67350648f03f58143ee..20e946b1e744d9d0d4f4802969d05e8c640e5b17 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -481,8 +481,10 @@ static int ax_open(struct net_device *dev) return ret; ret = ax_ei_open(dev); - if (ret) + if (ret) { + free_irq(dev->irq, dev); return ret; + } /* turn the phy on (if turned off) */ diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 293f9c16e7860b87944812c104a50804245326fe..37617abc164769aa5f8be38a797ea4337a3585e0 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *); static void b44_init_hw(struct b44 *, int); -static int dma_desc_align_mask; static int dma_desc_sync_size; static int instance; @@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, unsigned long offset, enum dma_data_direction dir) { - ssb_dma_sync_single_range_for_device(sdev, dma_base, - offset & dma_desc_align_mask, - dma_desc_sync_size, dir); + dma_sync_single_for_device(sdev->dma_dev, dma_base + offset, + dma_desc_sync_size, dir); } static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, @@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, unsigned long offset, enum dma_data_direction dir) { - ssb_dma_sync_single_range_for_cpu(sdev, dma_base, - offset & dma_desc_align_mask, - dma_desc_sync_size, dir); + dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset, + dma_desc_sync_size, dir); } static inline unsigned long br32(const struct b44 *bp, unsigned long reg) @@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp) BUG_ON(skb == NULL); - ssb_dma_unmap_single(bp->sdev, - rp->mapping, - skb->len, - DMA_TO_DEVICE); + dma_unmap_single(bp->sdev->dma_dev, + rp->mapping, + skb->len, + DMA_TO_DEVICE); rp->skb = NULL; dev_kfree_skb_irq(skb); } @@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) if (skb == NULL) return -ENOMEM; - mapping = ssb_dma_map_single(bp->sdev, skb->data, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); /* Hardware bug work-around, the chip is unable to do PCI DMA to/from anything above 1GB :-( */ - if (ssb_dma_mapping_error(bp->sdev, mapping) || + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { /* Sigh... */ - if (!ssb_dma_mapping_error(bp->sdev, mapping)) - ssb_dma_unmap_single(bp->sdev, mapping, + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (skb == NULL) return -ENOMEM; - mapping = ssb_dma_map_single(bp->sdev, skb->data, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); - if (ssb_dma_mapping_error(bp->sdev, mapping) || - mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { - if (!ssb_dma_mapping_error(bp->sdev, mapping)) - ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return -ENOMEM; } @@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dest_idx * sizeof(*dest_desc), DMA_BIDIRECTIONAL); - ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); + dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); } static int b44_rx(struct b44 *bp, int budget) @@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int budget) struct rx_header *rh; u16 len; - ssb_dma_sync_single_for_cpu(bp->sdev, map, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(bp->sdev->dma_dev, map, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); rh = (struct rx_header *) skb->data; len = le16_to_cpu(rh->len); if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || @@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int budget) skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); if (skb_size < 0) goto drop_it; - ssb_dma_unmap_single(bp->sdev, map, - skb_size, DMA_FROM_DEVICE); + dma_unmap_single(bp->sdev->dma_dev, map, + skb_size, DMA_FROM_DEVICE); /* Leave out rx_header */ skb_put(skb, len + RX_PKT_OFFSET); skb_pull(skb, RX_PKT_OFFSET); @@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) goto err_out; } - mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); - if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { struct sk_buff *bounce_skb; /* Chip can't handle DMA to/from >1GB, use bounce buffer */ - if (!ssb_dma_mapping_error(bp->sdev, mapping)) - ssb_dma_unmap_single(bp->sdev, mapping, len, + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, len, DMA_TO_DEVICE); bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; - mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, - len, DMA_TO_DEVICE); - if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { - if (!ssb_dma_mapping_error(bp->sdev, mapping)) - ssb_dma_unmap_single(bp->sdev, mapping, + mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, len, DMA_TO_DEVICE); dev_kfree_skb_any(bounce_skb); goto err_out; @@ -1068,8 +1065,8 @@ static void b44_free_rings(struct b44 *bp) if (rp->skb == NULL) continue; - ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } @@ -1080,8 +1077,8 @@ static void b44_free_rings(struct b44 *bp) if (rp->skb == NULL) continue; - ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len, - DMA_TO_DEVICE); + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, + DMA_TO_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } @@ -1103,14 +1100,12 @@ static void b44_init_rings(struct b44 *bp) memset(bp->tx_ring, 0, B44_TX_RING_BYTES); if (bp->flags & B44_FLAG_RX_RING_HACK) - ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); + dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); if (bp->flags & B44_FLAG_TX_RING_HACK) - ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); + dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, DMA_TO_DEVICE); for (i = 0; i < bp->rx_pending; i++) { if (b44_alloc_rx_skb(bp, -1, i) < 0) @@ -1130,27 +1125,23 @@ static void b44_free_consistent(struct b44 *bp) bp->tx_buffers = NULL; if (bp->rx_ring) { if (bp->flags & B44_FLAG_RX_RING_HACK) { - ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); + dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); kfree(bp->rx_ring); } else - ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, - bp->rx_ring, bp->rx_ring_dma, - GFP_KERNEL); + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, + bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; bp->flags &= ~B44_FLAG_RX_RING_HACK; } if (bp->tx_ring) { if (bp->flags & B44_FLAG_TX_RING_HACK) { - ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); + dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, DMA_TO_DEVICE); kfree(bp->tx_ring); } else - ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, - bp->tx_ring, bp->tx_ring_dma, - GFP_KERNEL); + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, + bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; bp->flags &= ~B44_FLAG_TX_RING_HACK; } @@ -1175,7 +1166,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) goto out_err; size = DMA_TABLE_BYTES; - bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp); + bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, + &bp->rx_ring_dma, gfp); if (!bp->rx_ring) { /* Allocation may have failed due to pci_alloc_consistent insisting on use of GFP_DMA, which is more restrictive @@ -1187,11 +1179,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) if (!rx_ring) goto out_err; - rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); + rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); - if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || + if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || rx_ring_dma + size > DMA_BIT_MASK(30)) { kfree(rx_ring); goto out_err; @@ -1202,7 +1194,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) bp->flags |= B44_FLAG_RX_RING_HACK; } - bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp); + bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, + &bp->tx_ring_dma, gfp); if (!bp->tx_ring) { /* Allocation may have failed due to ssb_dma_alloc_consistent insisting on use of GFP_DMA, which is more restrictive @@ -1214,11 +1207,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) if (!tx_ring) goto out_err; - tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); + tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); - if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || + if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || tx_ring_dma + size > DMA_BIT_MASK(30)) { kfree(tx_ring); goto out_err; @@ -2176,12 +2169,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev, "Failed to powerup the bus\n"); goto err_out_free_dev; } - err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); - if (err) { + + if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || + dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { dev_err(sdev->dev, "Required 30BIT DMA mask unsupported by the system\n"); goto err_out_powerdown; } + err = b44_get_invariants(bp); if (err) { dev_err(sdev->dev, @@ -2344,7 +2339,6 @@ static int __init b44_init(void) int err; /* Setup paramaters for syncing RX/TX DMA descriptors */ - dma_desc_align_mask = ~(dma_desc_align_size - 1); dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); err = b44_pci_init(); diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c index faf5add894d7788ebb8b1621951f164d772505d8..0d2c5da08937d33ad4a381e8a08ef04de378df86 100644 --- a/drivers/net/bcm63xx_enet.c +++ b/drivers/net/bcm63xx_enet.c @@ -1496,7 +1496,7 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (priv->has_phy) { if (!priv->phydev) return -ENODEV; - return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); + return phy_mii_ioctl(priv->phydev, rq, cmd); } else { struct mii_if_info mii; diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index b46be490cd2aab76bf5cf84a496d18632b10cb59..99197bd54da558ef26cf8a62a54af19aaa02e0f8 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -33,7 +33,7 @@ #include "be_hw.h" -#define DRV_VER "2.102.147u" +#define DRV_VER "2.103.175u" #define DRV_NAME "be2net" #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" @@ -220,7 +220,16 @@ struct be_rx_obj { struct be_rx_page_info page_info_tbl[RX_Q_LEN]; }; +struct be_vf_cfg { + unsigned char vf_mac_addr[ETH_ALEN]; + u32 vf_if_handle; + u32 vf_pmac_id; + u16 vf_vlan_tag; + u32 vf_tx_rate; +}; + #define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ +#define BE_INVALID_PMAC_ID 0xffffffff struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -276,23 +285,26 @@ struct be_adapter { u32 port_num; bool promiscuous; bool wol; - u32 cap; + u32 function_mode; u32 rx_fc; /* Rx flow control */ u32 tx_fc; /* Tx flow control */ + bool ue_detected; + bool stats_ioctl_sent; int link_speed; u8 port_type; u8 transceiver; + u8 autoneg; u8 generation; /* BladeEngine ASIC generation */ u32 flash_status; struct completion flash_compl; bool sriov_enabled; - u32 vf_if_handle[BE_MAX_VF]; - u32 vf_pmac_id[BE_MAX_VF]; + struct be_vf_cfg vf_cfg[BE_MAX_VF]; u8 base_eq_id; + u8 is_virtfn; }; -#define be_physfn(adapter) (!adapter->pdev->is_virtfn) +#define be_physfn(adapter) (!adapter->is_virtfn) /* BladeEngine Generation numbers */ #define BE_GEN2 2 @@ -392,6 +404,15 @@ static inline u8 is_udp_pkt(struct sk_buff *skb) return val; } +static inline void be_check_sriov_fn_type(struct be_adapter *adapter) +{ + u8 data; + + pci_write_config_byte(adapter->pdev, 0xFE, 0xAA); + pci_read_config_byte(adapter->pdev, 0xFE, &data); + adapter->is_virtfn = (data != 0xAA); +} + extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped); extern void be_link_status_update(struct be_adapter *adapter, bool link_up); diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index b9ad799c719f1d5b541a33d93d095318e6c362fc..3d305494a6066fb987510abebe34a1332f80c114 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -25,6 +25,8 @@ static void be_mcc_notify(struct be_adapter *adapter) val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; + + wmb(); iowrite32(val, adapter->db + DB_MCCQ_OFFSET); } @@ -73,8 +75,10 @@ static int be_mcc_compl_process(struct be_adapter *adapter, be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); netdev_stats_update(adapter); + adapter->stats_ioctl_sent = false; } - } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) { + } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && + (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; dev_warn(&adapter->pdev->dev, @@ -186,7 +190,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) { - int cnt = 0, wait = 5; + int msecs = 0; u32 ready; do { @@ -201,15 +205,15 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) if (ready) break; - if (cnt > 4000000) { + if (msecs > 4000) { dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); + be_dump_ue(adapter); return -1; } - if (cnt > 50) - wait = 200; - cnt += wait; - udelay(wait); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(1)); + msecs++; } while (true); return 0; @@ -948,6 +952,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) sge->len = cpu_to_le32(nonemb_cmd->size); be_mcc_notify(adapter); + adapter->stats_ioctl_sent = true; err: spin_unlock_bh(&adapter->mcc_lock); @@ -1256,7 +1261,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) } /* Uses mbox */ -int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) +int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode) { struct be_mcc_wrb *wrb; struct be_cmd_req_query_fw_cfg *req; @@ -1277,7 +1282,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap) if (!status) { struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); *port_num = le32_to_cpu(resp->phys_port); - *cap = le32_to_cpu(resp->function_cap); + *mode = le32_to_cpu(resp->function_mode); } spin_unlock(&adapter->mbox_lock); @@ -1694,3 +1699,71 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, spin_unlock_bh(&adapter->mcc_lock); return status; } + +int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_phy_info *req; + struct be_sge *sge; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd->va; + sge = nonembedded_sgl(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, + OPCODE_COMMON_GET_PHY_DETAILS); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_PHY_DETAILS, + sizeof(*req)); + + sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma)); + sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(cmd->size); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_qos *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, + OPCODE_COMMON_SET_QOS); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_QOS, sizeof(*req)); + + req->hdr.domain = domain; + req->valid_bits = BE_QOS_BITS_NIC; + req->max_bps_nic = bps; + + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 763dc199e337255bbb55f30c8f612c96f723abb5..bdc10a28cfda9feb11ffdecee141a6e333a9e92d 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -124,6 +124,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_CQ_CREATE 12 #define OPCODE_COMMON_EQ_CREATE 13 #define OPCODE_COMMON_MCC_CREATE 21 +#define OPCODE_COMMON_SET_QOS 28 #define OPCODE_COMMON_SEEPROM_READ 30 #define OPCODE_COMMON_NTWK_RX_FILTER 34 #define OPCODE_COMMON_GET_FW_VERSION 35 @@ -144,6 +145,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 #define OPCODE_COMMON_GET_BEACON_STATE 70 #define OPCODE_COMMON_READ_TRANSRECV_DATA 73 +#define OPCODE_COMMON_GET_PHY_DETAILS 102 #define OPCODE_ETH_ACPI_CONFIG 2 #define OPCODE_ETH_PROMISCUOUS 3 @@ -747,7 +749,7 @@ struct be_cmd_resp_query_fw_cfg { u32 be_config_number; u32 asic_revision; u32 phys_port; - u32 function_cap; + u32 function_mode; u32 rsvd[26]; }; @@ -869,6 +871,46 @@ struct be_cmd_resp_seeprom_read { u8 seeprom_data[BE_READ_SEEPROM_LEN]; }; +enum { + PHY_TYPE_CX4_10GB = 0, + PHY_TYPE_XFP_10GB, + PHY_TYPE_SFP_1GB, + PHY_TYPE_SFP_PLUS_10GB, + PHY_TYPE_KR_10GB, + PHY_TYPE_KX4_10GB, + PHY_TYPE_BASET_10GB, + PHY_TYPE_BASET_1GB, + PHY_TYPE_DISABLED = 255 +}; + +struct be_cmd_req_get_phy_info { + struct be_cmd_req_hdr hdr; + u8 rsvd0[24]; +}; +struct be_cmd_resp_get_phy_info { + struct be_cmd_req_hdr hdr; + u16 phy_type; + u16 interface_type; + u32 misc_params; + u32 future_use[4]; +}; + +/*********************** Set QOS ***********************/ + +#define BE_QOS_BITS_NIC 1 + +struct be_cmd_req_set_qos { + struct be_cmd_req_hdr hdr; + u32 valid_bits; + u32 max_bps_nic; + u32 rsvd[7]; +}; + +struct be_cmd_resp_set_qos { + struct be_cmd_resp_hdr hdr; + u32 rsvd; +}; + extern int be_pci_fnum_get(struct be_adapter *adapter); extern int be_cmd_POST(struct be_adapter *adapter); extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, @@ -947,4 +989,8 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, u8 loopback_type, u8 enable); +extern int be_cmd_get_phy_info(struct be_adapter *adapter, + struct be_dma_mem *cmd); +extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); +extern void be_dump_ue(struct be_adapter *adapter); diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 200e98515909453a559e20926dc5090ded20a287..cd16243c7c364a858d849ac3f70ac10bbbfcb966 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -314,15 +314,19 @@ static int be_get_sset_count(struct net_device *netdev, int stringset) static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); - u8 mac_speed = 0, connector = 0; + struct be_dma_mem phy_cmd; + struct be_cmd_resp_get_phy_info *resp; + u8 mac_speed = 0; u16 link_speed = 0; bool link_up = false; int status; + u16 intf_type; - if (adapter->link_speed < 0) { + if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, &link_speed); + be_link_status_update(adapter, link_up); /* link_speed is in units of 10 Mbps */ if (link_speed) { ecmd->speed = link_speed*10; @@ -337,40 +341,57 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } } - status = be_cmd_read_port_type(adapter, adapter->port_num, - &connector); + phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info); + phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size, + &phy_cmd.dma); + if (!phy_cmd.va) { + dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); + return -ENOMEM; + } + status = be_cmd_get_phy_info(adapter, &phy_cmd); if (!status) { - switch (connector) { - case 7: + resp = (struct be_cmd_resp_get_phy_info *) phy_cmd.va; + intf_type = le16_to_cpu(resp->interface_type); + + switch (intf_type) { + case PHY_TYPE_XFP_10GB: + case PHY_TYPE_SFP_1GB: + case PHY_TYPE_SFP_PLUS_10GB: ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; - break; - case 0: - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_EXTERNAL; break; default: ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; break; } - } else { - ecmd->port = PORT_AUI; + + switch (intf_type) { + case PHY_TYPE_KR_10GB: + case PHY_TYPE_KX4_10GB: + ecmd->autoneg = AUTONEG_ENABLE; ecmd->transceiver = XCVR_INTERNAL; + break; + default: + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_EXTERNAL; + break; + } } /* Save for future use */ adapter->link_speed = ecmd->speed; adapter->port_type = ecmd->port; adapter->transceiver = ecmd->transceiver; + adapter->autoneg = ecmd->autoneg; + pci_free_consistent(adapter->pdev, phy_cmd.size, + phy_cmd.va, phy_cmd.dma); } else { ecmd->speed = adapter->link_speed; ecmd->port = adapter->port_type; ecmd->transceiver = adapter->transceiver; + ecmd->autoneg = adapter->autoneg; } ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; ecmd->phy_address = adapter->port_num; switch (ecmd->port) { case PORT_FIBRE: @@ -384,6 +405,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) break; } + if (ecmd->autoneg) { + ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full); + } + return 0; } diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 063026de49571ae638dad9f604175bfd51ddfebf..6c8f9bb8bfe67cfec9618e4690a6a068ee32855f 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -56,6 +56,16 @@ #define PCICFG_PM_CONTROL_OFFSET 0x44 #define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */ +/********* Online Control Registers *******/ +#define PCICFG_ONLINE0 0xB0 +#define PCICFG_ONLINE1 0xB4 + +/********* UE Status and Mask Registers ***/ +#define PCICFG_UE_STATUS_LOW 0xA0 +#define PCICFG_UE_STATUS_HIGH 0xA4 +#define PCICFG_UE_STATUS_LOW_MASK 0xA8 +#define PCICFG_UE_STATUS_HI_MASK 0xAC + /********* ISR0 Register offset **********/ #define CEV_ISR0_OFFSET 0xC18 #define CEV_ISR_SIZE 4 @@ -192,7 +202,7 @@ struct amap_eth_hdr_wrb { u8 event; u8 crc; u8 forward; - u8 ipsec; + u8 lso6; u8 mgmt; u8 ipcs; u8 udpcs; diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 54b14272f333330edc9cddbce502d7e0068103ba..74e146f470c60e9df5ff01806623a0aaaaa0ec82 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -40,6 +40,76 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); +/* UE Status Low CSR */ +static char *ue_status_low_desc[] = { + "CEV", + "CTX", + "DBUF", + "ERX", + "Host", + "MPU", + "NDMA", + "PTC ", + "RDMA ", + "RXF ", + "RXIPS ", + "RXULP0 ", + "RXULP1 ", + "RXULP2 ", + "TIM ", + "TPOST ", + "TPRE ", + "TXIPS ", + "TXULP0 ", + "TXULP1 ", + "UC ", + "WDMA ", + "TXULP2 ", + "HOST1 ", + "P0_OB_LINK ", + "P1_OB_LINK ", + "HOST_GPIO ", + "MBOX ", + "AXGMAC0", + "AXGMAC1", + "JTAG", + "MPU_INTPEND" +}; +/* UE Status High CSR */ +static char *ue_status_hi_desc[] = { + "LPCMEMHOST", + "MGMT_MAC", + "PCS0ONLINE", + "MPU_IRAM", + "PCS1ONLINE", + "PCTL0", + "PCTL1", + "PMEM", + "RR", + "TXPB", + "RXPP", + "XAUI", + "TXP", + "ARM", + "IPC", + "HOST2", + "HOST3", + "HOST4", + "HOST5", + "HOST6", + "HOST7", + "HOST8", + "HOST9", + "NETC" + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown", + "Unknown" +}; static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { @@ -89,6 +159,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) u32 val = 0; val |= qid & DB_RQ_RING_ID_MASK; val |= posted << DB_RQ_NUM_POSTED_SHIFT; + + wmb(); iowrite32(val, adapter->db + DB_RQ_OFFSET); } @@ -97,6 +169,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted) u32 val = 0; val |= qid & DB_TXULP_RING_ID_MASK; val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; + + wmb(); iowrite32(val, adapter->db + DB_TXULP1_OFFSET); } @@ -373,10 +447,12 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); - if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, hdr, skb_shinfo(skb)->gso_size); + if (skb_is_gso_v6(skb)) + AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (is_tcp_pkt(skb)) AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); @@ -546,11 +622,18 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. * If the user configures more, place BE in vlan promiscuous mode. */ -static int be_vid_config(struct be_adapter *adapter) +static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) { u16 vtag[BE_NUM_VLANS_SUPPORTED]; u16 ntags = 0, i; int status = 0; + u32 if_handle; + + if (vf) { + if_handle = adapter->vf_cfg[vf_num].vf_if_handle; + vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag); + status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0); + } if (adapter->vlans_added <= adapter->max_vlans) { /* Construct VLAN Table to give to HW */ @@ -566,6 +649,7 @@ static int be_vid_config(struct be_adapter *adapter) status = be_cmd_vlan_config(adapter, adapter->if_handle, NULL, 0, 1, 1); } + return status; } @@ -586,27 +670,28 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + adapter->vlans_added++; if (!be_physfn(adapter)) return; adapter->vlan_tag[vid] = 1; - adapter->vlans_added++; if (adapter->vlans_added <= (adapter->max_vlans + 1)) - be_vid_config(adapter); + be_vid_config(adapter, false, 0); } static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + adapter->vlans_added--; + vlan_group_set_device(adapter->vlan_grp, vid, NULL); + if (!be_physfn(adapter)) return; adapter->vlan_tag[vid] = 0; - vlan_group_set_device(adapter->vlan_grp, vid, NULL); - adapter->vlans_added--; if (adapter->vlans_added <= adapter->max_vlans) - be_vid_config(adapter); + be_vid_config(adapter, false, 0); } static void be_set_multicast_list(struct net_device *netdev) @@ -650,14 +735,93 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) return -EINVAL; - status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf], - adapter->vf_pmac_id[vf]); + if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) + status = be_cmd_pmac_del(adapter, + adapter->vf_cfg[vf].vf_if_handle, + adapter->vf_cfg[vf].vf_pmac_id); - status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf], - &adapter->vf_pmac_id[vf]); - if (!status) + status = be_cmd_pmac_add(adapter, mac, + adapter->vf_cfg[vf].vf_if_handle, + &adapter->vf_cfg[vf].vf_pmac_id); + + if (status) dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", mac, vf); + else + memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN); + + return status; +} + +static int be_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *vi) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (!adapter->sriov_enabled) + return -EPERM; + + if (vf >= num_vfs) + return -EINVAL; + + vi->vf = vf; + vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate; + vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag; + vi->qos = 0; + memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN); + + return 0; +} + +static int be_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; + + if (!adapter->sriov_enabled) + return -EPERM; + + if ((vf >= num_vfs) || (vlan > 4095)) + return -EINVAL; + + if (vlan) { + adapter->vf_cfg[vf].vf_vlan_tag = vlan; + adapter->vlans_added++; + } else { + adapter->vf_cfg[vf].vf_vlan_tag = 0; + adapter->vlans_added--; + } + + status = be_vid_config(adapter, true, vf); + + if (status) + dev_info(&adapter->pdev->dev, + "VLAN %d config on VF %d failed\n", vlan, vf); + return status; +} + +static int be_set_vf_tx_rate(struct net_device *netdev, + int vf, int rate) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; + + if (!adapter->sriov_enabled) + return -EPERM; + + if ((vf >= num_vfs) || (rate < 0)) + return -EINVAL; + + if (rate > 10000) + rate = 10000; + + adapter->vf_cfg[vf].vf_tx_rate = rate; + status = be_cmd_set_qos(adapter, rate / 10, vf); + + if (status) + dev_info(&adapter->pdev->dev, + "tx rate %d on VF %d failed\n", rate, vf); return status; } @@ -869,7 +1033,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, /* vlanf could be wrongly set in some cards. * ignore if vtm is not set */ - if ((adapter->cap & 0x400) && !vtm) + if ((adapter->function_mode & 0x400) && !vtm) vlanf = 0; if (unlikely(vlanf)) { @@ -909,7 +1073,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, /* vlanf could be wrongly set in some cards. * ignore if vtm is not set */ - if ((adapter->cap & 0x400) && !vtm) + if ((adapter->function_mode & 0x400) && !vtm) vlanf = 0; skb = napi_get_frags(&eq_obj->napi); @@ -971,6 +1135,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) return NULL; + rmb(); be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); queue_tail_inc(&adapter->rx_obj.cq); @@ -1064,6 +1229,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) return NULL; + rmb(); be_dws_le_to_cpu(txcp, sizeof(*txcp)); txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; @@ -1111,6 +1277,7 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) if (!eqe->evt) return NULL; + rmb(); eqe->evt = le32_to_cpu(eqe->evt); queue_tail_inc(&eq_obj->q); return eqe; @@ -1576,12 +1743,66 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) return 1; } +static inline bool be_detect_ue(struct be_adapter *adapter) +{ + u32 online0 = 0, online1 = 0; + + pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0); + + pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1); + + if (!online0 || !online1) { + adapter->ue_detected = true; + dev_err(&adapter->pdev->dev, + "UE Detected!! online0=%d online1=%d\n", + online0, online1); + return true; + } + + return false; +} + +void be_dump_ue(struct be_adapter *adapter) +{ + u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; + u32 i; + + pci_read_config_dword(adapter->pdev, + PCICFG_UE_STATUS_LOW, &ue_status_lo); + pci_read_config_dword(adapter->pdev, + PCICFG_UE_STATUS_HIGH, &ue_status_hi); + pci_read_config_dword(adapter->pdev, + PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask); + pci_read_config_dword(adapter->pdev, + PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask); + + ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); + ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); + + if (ue_status_lo) { + for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { + if (ue_status_lo & 1) + dev_err(&adapter->pdev->dev, + "UE: %s bit set\n", ue_status_low_desc[i]); + } + } + if (ue_status_hi) { + for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) { + if (ue_status_hi & 1) + dev_err(&adapter->pdev->dev, + "UE: %s bit set\n", ue_status_hi_desc[i]); + } + } + +} + static void be_worker(struct work_struct *work) { struct be_adapter *adapter = container_of(work, struct be_adapter, work.work); - be_cmd_get_stats(adapter, &adapter->stats.cmd); + if (!adapter->stats_ioctl_sent) + be_cmd_get_stats(adapter, &adapter->stats.cmd); /* Set EQ delay */ be_rx_eqd_update(adapter); @@ -1593,6 +1814,10 @@ static void be_worker(struct work_struct *work) adapter->rx_post_starved = false; be_post_rx_frags(adapter); } + if (!adapter->ue_detected) { + if (be_detect_ue(adapter)) + be_dump_ue(adapter); + } schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } @@ -1620,9 +1845,11 @@ static void be_msix_enable(struct be_adapter *adapter) static void be_sriov_enable(struct be_adapter *adapter) { + be_check_sriov_fn_type(adapter); #ifdef CONFIG_PCI_IOV - int status; if (be_physfn(adapter) && num_vfs) { + int status; + status = pci_enable_sriov(adapter->pdev, num_vfs); adapter->sriov_enabled = status ? false : true; } @@ -1735,6 +1962,44 @@ static void be_irq_unregister(struct be_adapter *adapter) adapter->isr_registered = false; } +static int be_close(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_eq_obj *rx_eq = &adapter->rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + int vec; + + cancel_delayed_work_sync(&adapter->work); + + be_async_mcc_disable(adapter); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + adapter->link_up = false; + + be_intr_set(adapter, false); + + if (adapter->msix_enabled) { + vec = be_msix_vec_get(adapter, tx_eq->q.id); + synchronize_irq(vec); + vec = be_msix_vec_get(adapter, rx_eq->q.id); + synchronize_irq(vec); + } else { + synchronize_irq(netdev->irq); + } + be_irq_unregister(adapter); + + napi_disable(&rx_eq->napi); + napi_disable(&tx_eq->napi); + + /* Wait for all pending tx completions to arrive so that + * all tx skbs are freed. + */ + be_tx_compl_clean(adapter); + + return 0; +} + static int be_open(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -1765,27 +2030,29 @@ static int be_open(struct net_device *netdev) /* Now that interrupts are on we can process async mcc */ be_async_mcc_enable(adapter); + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); + status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, &link_speed); if (status) - goto ret_sts; + goto err; be_link_status_update(adapter, link_up); - if (be_physfn(adapter)) - status = be_vid_config(adapter); - if (status) - goto ret_sts; - if (be_physfn(adapter)) { + status = be_vid_config(adapter, false, 0); + if (status) + goto err; + status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) - goto ret_sts; + goto err; } - schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); -ret_sts: - return status; + return 0; +err: + be_close(adapter->netdev); + return -EIO; } static int be_setup_wol(struct be_adapter *adapter, bool enable) @@ -1853,13 +2120,15 @@ static int be_setup(struct be_adapter *adapter) cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; status = be_cmd_if_create(adapter, cap_flags, en_flags, - mac, true, &adapter->vf_if_handle[vf], + mac, true, + &adapter->vf_cfg[vf].vf_if_handle, NULL, vf+1); if (status) { dev_err(&adapter->pdev->dev, "Interface Create failed for VF %d\n", vf); goto if_destroy; } + adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID; vf++; } } else if (!be_physfn(adapter)) { @@ -1893,8 +2162,9 @@ static int be_setup(struct be_adapter *adapter) be_tx_queues_destroy(adapter); if_destroy: for (vf = 0; vf < num_vfs; vf++) - if (adapter->vf_if_handle[vf]) - be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]); + if (adapter->vf_cfg[vf].vf_if_handle) + be_cmd_if_destroy(adapter, + adapter->vf_cfg[vf].vf_if_handle); be_cmd_if_destroy(adapter, adapter->if_handle); do_none: return status; @@ -1913,43 +2183,6 @@ static int be_clear(struct be_adapter *adapter) return 0; } -static int be_close(struct net_device *netdev) -{ - struct be_adapter *adapter = netdev_priv(netdev); - struct be_eq_obj *rx_eq = &adapter->rx_eq; - struct be_eq_obj *tx_eq = &adapter->tx_eq; - int vec; - - cancel_delayed_work_sync(&adapter->work); - - be_async_mcc_disable(adapter); - - netif_stop_queue(netdev); - netif_carrier_off(netdev); - adapter->link_up = false; - - be_intr_set(adapter, false); - - if (adapter->msix_enabled) { - vec = be_msix_vec_get(adapter, tx_eq->q.id); - synchronize_irq(vec); - vec = be_msix_vec_get(adapter, rx_eq->q.id); - synchronize_irq(vec); - } else { - synchronize_irq(netdev->irq); - } - be_irq_unregister(adapter); - - napi_disable(&rx_eq->napi); - napi_disable(&tx_eq->napi); - - /* Wait for all pending tx completions to arrive so that - * all tx skbs are freed. - */ - be_tx_compl_clean(adapter); - - return 0; -} #define FW_FILE_HDR_SIGN "ServerEngines Corp. " char flash_cookie[2][16] = {"*** SE FLAS", @@ -2174,7 +2407,10 @@ static struct net_device_ops be_netdev_ops = { .ndo_vlan_rx_register = be_vlan_register, .ndo_vlan_rx_add_vid = be_vlan_add_vid, .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, - .ndo_set_vf_mac = be_set_vf_mac + .ndo_set_vf_mac = be_set_vf_mac, + .ndo_set_vf_vlan = be_set_vf_vlan, + .ndo_set_vf_tx_rate = be_set_vf_tx_rate, + .ndo_get_vf_config = be_get_vf_config }; static void be_netdev_init(struct net_device *netdev) @@ -2183,7 +2419,7 @@ static void be_netdev_init(struct net_device *netdev) netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | - NETIF_F_GRO; + NETIF_F_GRO | NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; @@ -2393,7 +2629,7 @@ static int be_get_config(struct be_adapter *adapter) return status; status = be_cmd_query_fw_cfg(adapter, - &adapter->port_num, &adapter->cap); + &adapter->port_num, &adapter->function_mode); if (status) return status; @@ -2413,7 +2649,7 @@ static int be_get_config(struct be_adapter *adapter) memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); } - if (adapter->cap & 0x400) + if (adapter->function_mode & 0x400) adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; else adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 368f33313fb6619635383ca9799ae7cf8d5e6376..012613fde3f4d61f1217603a3bd811673f43d324 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev) # define bfin_tx_hwtstamp(dev, skb) #endif -static void adjust_tx_list(void) +static inline void _tx_reclaim_skb(void) +{ + do { + tx_list_head->desc_a.config &= ~DMAEN; + tx_list_head->status.status_word = 0; + if (tx_list_head->skb) { + dev_kfree_skb(tx_list_head->skb); + tx_list_head->skb = NULL; + } + tx_list_head = tx_list_head->next; + + } while (tx_list_head->status.status_word != 0); +} + +static void tx_reclaim_skb(struct bfin_mac_local *lp) { int timeout_cnt = MAX_TIMEOUT_CNT; - if (tx_list_head->status.status_word != 0 && - current_tx_ptr != tx_list_head) { - goto adjust_head; /* released something, just return; */ - } + if (tx_list_head->status.status_word != 0) + _tx_reclaim_skb(); - /* - * if nothing released, check wait condition - * current's next can not be the head, - * otherwise the dma will not stop as we want - */ - if (current_tx_ptr->next->next == tx_list_head) { + if (current_tx_ptr->next == tx_list_head) { while (tx_list_head->status.status_word == 0) { + /* slow down polling to avoid too many queue stop. */ udelay(10); - if (tx_list_head->status.status_word != 0 || - !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) { - goto adjust_head; - } - if (timeout_cnt-- < 0) { - printk(KERN_ERR DRV_NAME - ": wait for adjust tx list head timeout\n"); + /* reclaim skb if DMA is not running. */ + if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) + break; + if (timeout_cnt-- < 0) break; - } - } - if (tx_list_head->status.status_word != 0) { - goto adjust_head; } + + if (timeout_cnt >= 0) + _tx_reclaim_skb(); + else + netif_stop_queue(lp->ndev); } - return; + if (current_tx_ptr->next != tx_list_head && + netif_queue_stopped(lp->ndev)) + netif_wake_queue(lp->ndev); + + if (tx_list_head != current_tx_ptr) { + /* shorten the timer interval if tx queue is stopped */ + if (netif_queue_stopped(lp->ndev)) + lp->tx_reclaim_timer.expires = + jiffies + (TX_RECLAIM_JIFFIES >> 4); + else + lp->tx_reclaim_timer.expires = + jiffies + TX_RECLAIM_JIFFIES; + + mod_timer(&lp->tx_reclaim_timer, + lp->tx_reclaim_timer.expires); + } -adjust_head: - do { - tx_list_head->desc_a.config &= ~DMAEN; - tx_list_head->status.status_word = 0; - if (tx_list_head->skb) { - dev_kfree_skb(tx_list_head->skb); - tx_list_head->skb = NULL; - } else { - printk(KERN_ERR DRV_NAME - ": no sk_buff in a transmitted frame!\n"); - } - tx_list_head = tx_list_head->next; - } while (tx_list_head->status.status_word != 0 && - current_tx_ptr != tx_list_head); return; +} +static void tx_reclaim_skb_timeout(unsigned long lp) +{ + tx_reclaim_skb((struct bfin_mac_local *)lp); } static int bfin_mac_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { + struct bfin_mac_local *lp = netdev_priv(dev); u16 *data; u32 data_align = (unsigned long)(skb->data) & 0x3; union skb_shared_tx *shtx = skb_tx(skb); @@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, skb->len); current_tx_ptr->desc_a.start_addr = (u32)current_tx_ptr->packet; - if (current_tx_ptr->status.status_word != 0) - current_tx_ptr->status.status_word = 0; blackfin_dcache_flush_range( (u32)current_tx_ptr->packet, (u32)(current_tx_ptr->packet + skb->len + 2)); @@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, */ SSYNC(); + /* always clear status buffer before start tx dma */ + current_tx_ptr->status.status_word = 0; + /* enable this packet's dma */ current_tx_ptr->desc_a.config |= DMAEN; @@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); out: - adjust_tx_list(); - bfin_tx_hwtstamp(dev, skb); current_tx_ptr = current_tx_ptr->next; dev->stats.tx_packets++; dev->stats.tx_bytes += (skb->len); + + tx_reclaim_skb(lp); + return NETDEV_TX_OK; } @@ -1167,8 +1181,11 @@ static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id) #ifdef CONFIG_NET_POLL_CONTROLLER static void bfin_mac_poll(struct net_device *dev) { + struct bfin_mac_local *lp = netdev_priv(dev); + disable_irq(IRQ_MAC_RX); bfin_mac_interrupt(IRQ_MAC_RX, dev); + tx_reclaim_skb(lp); enable_irq(IRQ_MAC_RX); } #endif /* CONFIG_NET_POLL_CONTROLLER */ @@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void) /* Our watchdog timed out. Called by the networking layer */ static void bfin_mac_timeout(struct net_device *dev) { + struct bfin_mac_local *lp = netdev_priv(dev); + pr_debug("%s: %s\n", dev->name, __func__); bfin_mac_disable(); - /* reset tx queue */ - tx_list_tail = tx_list_head->next; + del_timer(&lp->tx_reclaim_timer); + + /* reset tx queue and free skb */ + while (tx_list_head != current_tx_ptr) { + tx_list_head->desc_a.config &= ~DMAEN; + tx_list_head->status.status_word = 0; + if (tx_list_head->skb) { + dev_kfree_skb(tx_list_head->skb); + tx_list_head->skb = NULL; + } + tx_list_head = tx_list_head->next; + } + + if (netif_queue_stopped(lp->ndev)) + netif_wake_queue(lp->ndev); bfin_mac_enable(); @@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); platform_set_drvdata(pdev, ndev); lp = netdev_priv(ndev); + lp->ndev = ndev; /* Grab the MAC address in the MAC */ *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); @@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) ndev->netdev_ops = &bfin_mac_netdev_ops; ndev->ethtool_ops = &bfin_mac_ethtool_ops; + init_timer(&lp->tx_reclaim_timer); + lp->tx_reclaim_timer.data = (unsigned long)lp; + lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout; + spin_lock_init(&lp->lock); /* now, enable interrupts */ diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index 1ae7b82ceeee716b98c18f7f42d4cd6a6f8ccc74..04e4050df18b91fca21cf39880b654422f848b12 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h @@ -13,9 +13,12 @@ #include #include #include +#include #define BFIN_MAC_CSUM_OFFLOAD +#define TX_RECLAIM_JIFFIES (HZ / 5) + struct dma_descriptor { struct dma_descriptor *next_dma_desc; unsigned long start_addr; @@ -68,6 +71,8 @@ struct bfin_mac_local { int wol; /* Wake On Lan */ int irq_wake_requested; + struct timer_list tx_reclaim_timer; + struct net_device *ndev; /* MII and PHY stuffs */ int old_link; /* used by bf537_adjust_link */ diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 117432222a09e8b3c7521c6bf237621afe938fd9..e6a803f1c507f848ee8b4ac28ddb4ce8fc138c44 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -58,8 +58,8 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.0.15" -#define DRV_MODULE_RELDATE "May 4, 2010" +#define DRV_MODULE_VERSION "2.0.17" +#define DRV_MODULE_RELDATE "July 18, 2010" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw" @@ -253,7 +253,8 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) { u32 diff; - smp_mb(); + /* Tell compiler to fetch tx_prod and tx_cons from memory. */ + barrier(); /* The ring uses 256 indices for 255 entries, one of them * needs to be skipped. @@ -692,9 +693,9 @@ bnx2_free_tx_mem(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; if (txr->tx_desc_ring) { - pci_free_consistent(bp->pdev, TXBD_RING_SIZE, - txr->tx_desc_ring, - txr->tx_desc_mapping); + dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE, + txr->tx_desc_ring, + txr->tx_desc_mapping); txr->tx_desc_ring = NULL; } kfree(txr->tx_buf_ring); @@ -714,9 +715,9 @@ bnx2_free_rx_mem(struct bnx2 *bp) for (j = 0; j < bp->rx_max_ring; j++) { if (rxr->rx_desc_ring[j]) - pci_free_consistent(bp->pdev, RXBD_RING_SIZE, - rxr->rx_desc_ring[j], - rxr->rx_desc_mapping[j]); + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, + rxr->rx_desc_ring[j], + rxr->rx_desc_mapping[j]); rxr->rx_desc_ring[j] = NULL; } vfree(rxr->rx_buf_ring); @@ -724,9 +725,9 @@ bnx2_free_rx_mem(struct bnx2 *bp) for (j = 0; j < bp->rx_max_pg_ring; j++) { if (rxr->rx_pg_desc_ring[j]) - pci_free_consistent(bp->pdev, RXBD_RING_SIZE, - rxr->rx_pg_desc_ring[j], - rxr->rx_pg_desc_mapping[j]); + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, + rxr->rx_pg_desc_ring[j], + rxr->rx_pg_desc_mapping[j]); rxr->rx_pg_desc_ring[j] = NULL; } vfree(rxr->rx_pg_ring); @@ -748,8 +749,8 @@ bnx2_alloc_tx_mem(struct bnx2 *bp) return -ENOMEM; txr->tx_desc_ring = - pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE, - &txr->tx_desc_mapping); + dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, + &txr->tx_desc_mapping, GFP_KERNEL); if (txr->tx_desc_ring == NULL) return -ENOMEM; } @@ -776,8 +777,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) for (j = 0; j < bp->rx_max_ring; j++) { rxr->rx_desc_ring[j] = - pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, - &rxr->rx_desc_mapping[j]); + dma_alloc_coherent(&bp->pdev->dev, + RXBD_RING_SIZE, + &rxr->rx_desc_mapping[j], + GFP_KERNEL); if (rxr->rx_desc_ring[j] == NULL) return -ENOMEM; @@ -795,8 +798,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) for (j = 0; j < bp->rx_max_pg_ring; j++) { rxr->rx_pg_desc_ring[j] = - pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, - &rxr->rx_pg_desc_mapping[j]); + dma_alloc_coherent(&bp->pdev->dev, + RXBD_RING_SIZE, + &rxr->rx_pg_desc_mapping[j], + GFP_KERNEL); if (rxr->rx_pg_desc_ring[j] == NULL) return -ENOMEM; @@ -816,16 +821,16 @@ bnx2_free_mem(struct bnx2 *bp) for (i = 0; i < bp->ctx_pages; i++) { if (bp->ctx_blk[i]) { - pci_free_consistent(bp->pdev, BCM_PAGE_SIZE, - bp->ctx_blk[i], - bp->ctx_blk_mapping[i]); + dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, + bp->ctx_blk[i], + bp->ctx_blk_mapping[i]); bp->ctx_blk[i] = NULL; } } if (bnapi->status_blk.msi) { - pci_free_consistent(bp->pdev, bp->status_stats_size, - bnapi->status_blk.msi, - bp->status_blk_mapping); + dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, + bnapi->status_blk.msi, + bp->status_blk_mapping); bnapi->status_blk.msi = NULL; bp->stats_blk = NULL; } @@ -846,8 +851,8 @@ bnx2_alloc_mem(struct bnx2 *bp) bp->status_stats_size = status_blk_size + sizeof(struct statistics_block); - status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, - &bp->status_blk_mapping); + status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, + &bp->status_blk_mapping, GFP_KERNEL); if (status_blk == NULL) goto alloc_mem_err; @@ -860,7 +865,7 @@ bnx2_alloc_mem(struct bnx2 *bp) bnapi->hw_rx_cons_ptr = &bnapi->status_blk.msi->status_rx_quick_consumer_index0; if (bp->flags & BNX2_FLAG_MSIX_CAP) { - for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) { + for (i = 1; i < bp->irq_nvecs; i++) { struct status_block_msix *sblk; bnapi = &bp->bnx2_napi[i]; @@ -885,9 +890,10 @@ bnx2_alloc_mem(struct bnx2 *bp) if (bp->ctx_pages == 0) bp->ctx_pages = 1; for (i = 0; i < bp->ctx_pages; i++) { - bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev, + bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, - &bp->ctx_blk_mapping[i]); + &bp->ctx_blk_mapping[i], + GFP_KERNEL); if (bp->ctx_blk[i] == NULL) goto alloc_mem_err; } @@ -1446,7 +1452,8 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) static void bnx2_enable_forced_2g5(struct bnx2 *bp) { - u32 bmcr; + u32 uninitialized_var(bmcr); + int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return; @@ -1456,22 +1463,28 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); - bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); - val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; - val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G; - bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { + val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; + val |= MII_BNX2_SD_MISC1_FORCE | + MII_BNX2_SD_MISC1_FORCE_2_5G; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + } bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - bmcr |= BCM5708S_BMCR_FORCE_2500; + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (!err) + bmcr |= BCM5708S_BMCR_FORCE_2500; } else { return; } + if (err) + return; + if (bp->autoneg & AUTONEG_SPEED) { bmcr &= ~BMCR_ANENABLE; if (bp->req_duplex == DUPLEX_FULL) @@ -1483,7 +1496,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) static void bnx2_disable_forced_2g5(struct bnx2 *bp) { - u32 bmcr; + u32 uninitialized_var(bmcr); + int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return; @@ -1493,21 +1507,26 @@ bnx2_disable_forced_2g5(struct bnx2 *bp) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); - bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); - val &= ~MII_BNX2_SD_MISC1_FORCE; - bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { + val &= ~MII_BNX2_SD_MISC1_FORCE; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + } bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - bmcr &= ~BCM5708S_BMCR_FORCE_2500; + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (!err) + bmcr &= ~BCM5708S_BMCR_FORCE_2500; } else { return; } + if (err) + return; + if (bp->autoneg & AUTONEG_SPEED) bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; bnx2_write_phy(bp, bp->mii_bmcr, bmcr); @@ -2651,19 +2670,19 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) } static inline int -bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) +bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) { dma_addr_t mapping; struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; struct rx_bd *rxbd = &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; - struct page *page = alloc_page(GFP_ATOMIC); + struct page *page = alloc_page(gfp); if (!page) return -ENOMEM; - mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, + mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(bp->pdev, mapping)) { + if (dma_mapping_error(&bp->pdev->dev, mapping)) { __free_page(page); return -EIO; } @@ -2684,15 +2703,15 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) if (!page) return; - pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), + PAGE_SIZE, PCI_DMA_FROMDEVICE); __free_page(page); rx_pg->page = NULL; } static inline int -bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) +bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) { struct sk_buff *skb; struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; @@ -2700,7 +2719,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; unsigned long align; - skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp); if (skb == NULL) { return -ENOMEM; } @@ -2708,9 +2727,9 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) skb_reserve(skb, BNX2_RX_ALIGN - align); - mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(bp->pdev, mapping)) { + mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { dev_kfree_skb(skb); return -EIO; } @@ -2816,7 +2835,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) } } - pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); tx_buf->skb = NULL; @@ -2825,7 +2844,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) for (i = 0; i < last; i++) { sw_cons = NEXT_TX_BD(sw_cons); - pci_unmap_page(bp->pdev, + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr( &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], mapping), @@ -2932,7 +2951,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf = &rxr->rx_buf_ring[prod]; - pci_dma_sync_single_for_device(bp->pdev, + dma_sync_single_for_device(&bp->pdev->dev, dma_unmap_addr(cons_rx_buf, mapping), BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); @@ -2961,7 +2980,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, int err; u16 prod = ring_idx & 0xffff; - err = bnx2_alloc_rx_skb(bp, rxr, prod); + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC); if (unlikely(err)) { bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); if (hdr_len) { @@ -2974,7 +2993,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, } skb_reserve(skb, BNX2_RX_OFFSET); - pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, + dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); if (hdr_len == 0) { @@ -3026,7 +3045,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, rx_pg->page = NULL; err = bnx2_alloc_rx_page(bp, rxr, - RX_PG_RING_IDX(pg_prod)); + RX_PG_RING_IDX(pg_prod), + GFP_ATOMIC); if (unlikely(err)) { rxr->rx_pg_cons = pg_cons; rxr->rx_pg_prod = pg_prod; @@ -3035,7 +3055,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, return err; } - pci_unmap_page(bp->pdev, mapping_old, + dma_unmap_page(&bp->pdev->dev, mapping_old, PAGE_SIZE, PCI_DMA_FROMDEVICE); frag_size -= frag_len; @@ -3106,7 +3126,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_addr = dma_unmap_addr(rx_buf, mapping); - pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); @@ -3206,6 +3226,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) L2_FHDR_ERRORS_UDP_XSUM)) == 0)) skb->ip_summed = CHECKSUM_UNNECESSARY; } + if ((bp->dev->features & NETIF_F_RXHASH) && + ((status & L2_FHDR_STATUS_USE_RXHASH) == + L2_FHDR_STATUS_USE_RXHASH)) + skb->rxhash = rx_hdr->l2_fhdr_hash; skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); @@ -5162,7 +5186,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) ring_prod = prod = rxr->rx_pg_prod; for (i = 0; i < bp->rx_pg_ring_size; i++) { - if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) { + if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) { netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", ring_num, i, bp->rx_pg_ring_size); break; @@ -5174,7 +5198,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) ring_prod = prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { - if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) { + if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) { netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_num, i, bp->rx_ring_size); break; @@ -5320,7 +5344,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) continue; } - pci_unmap_single(bp->pdev, + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); @@ -5331,7 +5355,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) j++; for (k = 0; k < last; k++, j++) { tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; - pci_unmap_page(bp->pdev, + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_shinfo(skb)->frags[k].size, PCI_DMA_TODEVICE); @@ -5361,7 +5385,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) if (skb == NULL) continue; - pci_unmap_single(bp->pdev, + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); @@ -5714,9 +5738,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) for (i = 14; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); - map = pci_map_single(bp->pdev, skb->data, pkt_size, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(bp->pdev, map)) { + map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; } @@ -5754,7 +5778,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) udelay(5); - pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); + dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); dev_kfree_skb(skb); if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) @@ -5771,7 +5795,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) rx_hdr = rx_buf->desc; skb_reserve(rx_skb, BNX2_RX_OFFSET); - pci_dma_sync_single_for_cpu(bp->pdev, + dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bp->rx_buf_size, PCI_DMA_FROMDEVICE); @@ -6129,7 +6153,7 @@ bnx2_free_irq(struct bnx2 *bp) static void bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) { - int i, rc; + int i, total_vecs, rc; struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; struct net_device *dev = bp->dev; const int len = sizeof(bp->irq_tbl[0].name); @@ -6148,13 +6172,29 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) msix_ent[i].vector = 0; } - rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC); + total_vecs = msix_vecs; +#ifdef BCM_CNIC + total_vecs++; +#endif + rc = -ENOSPC; + while (total_vecs >= BNX2_MIN_MSIX_VEC) { + rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); + if (rc <= 0) + break; + if (rc > 0) + total_vecs = rc; + } + if (rc != 0) return; + msix_vecs = total_vecs; +#ifdef BCM_CNIC + msix_vecs--; +#endif bp->irq_nvecs = msix_vecs; bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; - for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { + for (i = 0; i < total_vecs; i++) { bp->irq_tbl[i].vector = msix_ent[i].vector; snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i); bp->irq_tbl[i].handler = bnx2_msi_1shot; @@ -6172,7 +6212,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) bp->irq_nvecs = 1; bp->irq_tbl[0].vector = bp->pdev->irq; - if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1) + if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi) bnx2_enable_msix(bp, msix_vecs); if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && @@ -6296,9 +6336,14 @@ static void bnx2_dump_state(struct bnx2 *bp) { struct net_device *dev = bp->dev; - u32 mcp_p0, mcp_p1; - - netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem)); + u32 mcp_p0, mcp_p1, val1, val2; + + pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1); + netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n", + atomic_read(&bp->intr_sem), val1); + pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1); + pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); + netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2); netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", REG_RD(bp, BNX2_EMAC_TX_STATUS), REG_RD(bp, BNX2_EMAC_RX_STATUS)); @@ -6434,8 +6479,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } else mss = 0; - mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(bp->pdev, mapping)) { + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -6463,9 +6508,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) txbd = &txr->tx_desc_ring[ring_prod]; len = frag->size; - mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, - len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(bp->pdev, mapping)) + mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset, + len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) goto dma_error; dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, mapping); @@ -6490,6 +6535,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * tx index in bnx2_tx_avail() below, because in + * bnx2_tx_int(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) netif_tx_wake_queue(txq); } @@ -6504,7 +6556,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) ring_prod = TX_RING_IDX(prod); tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = NULL; - pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); /* unmap remaining mapped pages */ @@ -6512,7 +6564,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) prod = NEXT_TX_BD(prod); ring_prod = TX_RING_IDX(prod); tx_buf = &txr->tx_buf_ring[ring_prod]; - pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping), + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); } @@ -6567,36 +6619,25 @@ bnx2_save_stats(struct bnx2 *bp) temp_stats[i] += hw_stats[i]; } -#define GET_64BIT_NET_STATS64(ctr) \ - (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ - (unsigned long) (ctr##_lo) - -#define GET_64BIT_NET_STATS32(ctr) \ - (ctr##_lo) +#define GET_64BIT_NET_STATS64(ctr) \ + (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo)) -#if (BITS_PER_LONG == 64) #define GET_64BIT_NET_STATS(ctr) \ GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \ GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr) -#else -#define GET_64BIT_NET_STATS(ctr) \ - GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \ - GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr) -#endif #define GET_32BIT_NET_STATS(ctr) \ (unsigned long) (bp->stats_blk->ctr + \ bp->temp_stats_blk->ctr) -static struct net_device_stats * -bnx2_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 * +bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); - struct net_device_stats *net_stats = &dev->stats; - if (bp->stats_blk == NULL) { + if (bp->stats_blk == NULL) return net_stats; - } + net_stats->rx_packets = GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) + @@ -6614,7 +6655,7 @@ bnx2_get_stats(struct net_device *dev) GET_64BIT_NET_STATS(stat_IfHCOutOctets); net_stats->multicast = - GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts); + GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts); net_stats->collisions = GET_32BIT_NET_STATS(stat_EtherStatsCollisions); @@ -7545,6 +7586,12 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data) return (ethtool_op_set_tx_csum(dev, data)); } +static int +bnx2_set_flags(struct net_device *dev, u32 data) +{ + return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH); +} + static const struct ethtool_ops bnx2_ethtool_ops = { .get_settings = bnx2_get_settings, .set_settings = bnx2_set_settings, @@ -7574,6 +7621,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = { .phys_id = bnx2_phys_id, .get_ethtool_stats = bnx2_get_ethtool_stats, .get_sset_count = bnx2_get_sset_count, + .set_flags = bnx2_set_flags, + .get_flags = ethtool_op_get_flags, }; /* Called with rtnl_lock */ @@ -8259,7 +8308,7 @@ static const struct net_device_ops bnx2_netdev_ops = { .ndo_open = bnx2_open, .ndo_start_xmit = bnx2_start_xmit, .ndo_stop = bnx2_close, - .ndo_get_stats = bnx2_get_stats, + .ndo_get_stats64 = bnx2_get_stats64, .ndo_set_rx_mode = bnx2_set_rx_mode, .ndo_do_ioctl = bnx2_ioctl, .ndo_validate_addr = eth_validate_addr, @@ -8320,7 +8369,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) memcpy(dev->dev_addr, bp->mac_addr, 6); memcpy(dev->perm_addr, bp->mac_addr, 6); - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO | + NETIF_F_RXHASH; vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); if (CHIP_NUM(bp) == CHIP_NUM_5709) { dev->features |= NETIF_F_IPV6_CSUM; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index ddaa3fc99876bbc183a02a2d5769131a525eac64..2104c1005d023fee037d49ee5c8cd6595ad93acb 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -295,6 +295,9 @@ struct l2_fhdr { #define L2_FHDR_ERRORS_TCP_XSUM (1<<28) #define L2_FHDR_ERRORS_UDP_XSUM (1<<31) + #define L2_FHDR_STATUS_USE_RXHASH \ + (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH) + u32 l2_fhdr_hash; #if defined(__BIG_ENDIAN) u16 l2_fhdr_pkt_len; @@ -6634,9 +6637,12 @@ struct flash_spec { #define BNX2_MAX_MSIX_HW_VEC 9 #define BNX2_MAX_MSIX_VEC 9 -#define BNX2_BASE_VEC 0 -#define BNX2_TX_VEC 1 -#define BNX2_TX_INT_NUM (BNX2_TX_VEC << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT) +#ifdef BCM_CNIC +#define BNX2_MIN_MSIX_VEC 2 +#else +#define BNX2_MIN_MSIX_VEC 1 +#endif + struct bnx2_irq { irq_handler_t handler; diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..084afce89ae9f2b8b7181045189d1c102a100cde --- /dev/null +++ b/drivers/net/bnx2x/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Broadcom 10-Gigabit ethernet driver +# + +obj-$(CONFIG_BNX2X) += bnx2x.o + +bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x/bnx2x.h similarity index 84% rename from drivers/net/bnx2x.h rename to drivers/net/bnx2x/bnx2x.h index bb0872a633157157501b5c2312cc6a73d642ed1b..53af9c93e75c3bca661abfb8dce7aabe96589f52 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -20,6 +20,10 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ +#define DRV_MODULE_VERSION "1.52.53-3" +#define DRV_MODULE_RELDATE "2010/18/04" +#define BNX2X_BC_VER 0x040200 + #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define BCM_VLAN 1 #endif @@ -32,7 +36,7 @@ #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) #define BCM_CNIC 1 -#include "cnic_if.h" +#include "../cnic_if.h" #endif @@ -45,10 +49,12 @@ #endif #include +#include #include "bnx2x_reg.h" #include "bnx2x_fw_defs.h" #include "bnx2x_hsi.h" #include "bnx2x_link.h" +#include "bnx2x_stats.h" /* error/debug prints */ @@ -106,6 +112,7 @@ do { \ dev_info(&bp->pdev->dev, __fmt, ##__args); \ } while (0) +void bnx2x_panic_dump(struct bnx2x *bp); #ifdef BNX2X_STOP_ON_ERROR #define bnx2x_panic() do { \ @@ -248,43 +255,6 @@ union db_prod { #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) -struct bnx2x_eth_q_stats { - u32 total_bytes_received_hi; - u32 total_bytes_received_lo; - u32 total_bytes_transmitted_hi; - u32 total_bytes_transmitted_lo; - u32 total_unicast_packets_received_hi; - u32 total_unicast_packets_received_lo; - u32 total_multicast_packets_received_hi; - u32 total_multicast_packets_received_lo; - u32 total_broadcast_packets_received_hi; - u32 total_broadcast_packets_received_lo; - u32 total_unicast_packets_transmitted_hi; - u32 total_unicast_packets_transmitted_lo; - u32 total_multicast_packets_transmitted_hi; - u32 total_multicast_packets_transmitted_lo; - u32 total_broadcast_packets_transmitted_hi; - u32 total_broadcast_packets_transmitted_lo; - u32 valid_bytes_received_hi; - u32 valid_bytes_received_lo; - - u32 error_bytes_received_hi; - u32 error_bytes_received_lo; - u32 etherstatsoverrsizepkts_hi; - u32 etherstatsoverrsizepkts_lo; - u32 no_buff_discard_hi; - u32 no_buff_discard_lo; - - u32 driver_xoff; - u32 rx_err_discard_pkt; - u32 rx_skb_alloc_failed; - u32 hw_csum_err; -}; - -#define BNX2X_NUM_Q_STATS 13 -#define Q_STATS_OFFSET32(stat_name) \ - (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) - struct bnx2x_fastpath { struct napi_struct napi; @@ -593,27 +563,6 @@ struct bnx2x_common { /* port */ -struct nig_stats { - u32 brb_discard; - u32 brb_packet; - u32 brb_truncate; - u32 flow_ctrl_discard; - u32 flow_ctrl_octets; - u32 flow_ctrl_packet; - u32 mng_discard; - u32 mng_octet_inp; - u32 mng_octet_out; - u32 mng_packet_inp; - u32 mng_packet_out; - u32 pbf_octets; - u32 pbf_packet; - u32 safc_inp; - u32 egress_mac_pkt0_lo; - u32 egress_mac_pkt0_hi; - u32 egress_mac_pkt1_lo; - u32 egress_mac_pkt1_hi; -}; - struct bnx2x_port { u32 pmf; @@ -641,156 +590,6 @@ struct bnx2x_port { /* end of port */ -enum bnx2x_stats_event { - STATS_EVENT_PMF = 0, - STATS_EVENT_LINK_UP, - STATS_EVENT_UPDATE, - STATS_EVENT_STOP, - STATS_EVENT_MAX -}; - -enum bnx2x_stats_state { - STATS_STATE_DISABLED = 0, - STATS_STATE_ENABLED, - STATS_STATE_MAX -}; - -struct bnx2x_eth_stats { - u32 total_bytes_received_hi; - u32 total_bytes_received_lo; - u32 total_bytes_transmitted_hi; - u32 total_bytes_transmitted_lo; - u32 total_unicast_packets_received_hi; - u32 total_unicast_packets_received_lo; - u32 total_multicast_packets_received_hi; - u32 total_multicast_packets_received_lo; - u32 total_broadcast_packets_received_hi; - u32 total_broadcast_packets_received_lo; - u32 total_unicast_packets_transmitted_hi; - u32 total_unicast_packets_transmitted_lo; - u32 total_multicast_packets_transmitted_hi; - u32 total_multicast_packets_transmitted_lo; - u32 total_broadcast_packets_transmitted_hi; - u32 total_broadcast_packets_transmitted_lo; - u32 valid_bytes_received_hi; - u32 valid_bytes_received_lo; - - u32 error_bytes_received_hi; - u32 error_bytes_received_lo; - u32 etherstatsoverrsizepkts_hi; - u32 etherstatsoverrsizepkts_lo; - u32 no_buff_discard_hi; - u32 no_buff_discard_lo; - - u32 rx_stat_ifhcinbadoctets_hi; - u32 rx_stat_ifhcinbadoctets_lo; - u32 tx_stat_ifhcoutbadoctets_hi; - u32 tx_stat_ifhcoutbadoctets_lo; - u32 rx_stat_dot3statsfcserrors_hi; - u32 rx_stat_dot3statsfcserrors_lo; - u32 rx_stat_dot3statsalignmenterrors_hi; - u32 rx_stat_dot3statsalignmenterrors_lo; - u32 rx_stat_dot3statscarriersenseerrors_hi; - u32 rx_stat_dot3statscarriersenseerrors_lo; - u32 rx_stat_falsecarriererrors_hi; - u32 rx_stat_falsecarriererrors_lo; - u32 rx_stat_etherstatsundersizepkts_hi; - u32 rx_stat_etherstatsundersizepkts_lo; - u32 rx_stat_dot3statsframestoolong_hi; - u32 rx_stat_dot3statsframestoolong_lo; - u32 rx_stat_etherstatsfragments_hi; - u32 rx_stat_etherstatsfragments_lo; - u32 rx_stat_etherstatsjabbers_hi; - u32 rx_stat_etherstatsjabbers_lo; - u32 rx_stat_maccontrolframesreceived_hi; - u32 rx_stat_maccontrolframesreceived_lo; - u32 rx_stat_bmac_xpf_hi; - u32 rx_stat_bmac_xpf_lo; - u32 rx_stat_bmac_xcf_hi; - u32 rx_stat_bmac_xcf_lo; - u32 rx_stat_xoffstateentered_hi; - u32 rx_stat_xoffstateentered_lo; - u32 rx_stat_xonpauseframesreceived_hi; - u32 rx_stat_xonpauseframesreceived_lo; - u32 rx_stat_xoffpauseframesreceived_hi; - u32 rx_stat_xoffpauseframesreceived_lo; - u32 tx_stat_outxonsent_hi; - u32 tx_stat_outxonsent_lo; - u32 tx_stat_outxoffsent_hi; - u32 tx_stat_outxoffsent_lo; - u32 tx_stat_flowcontroldone_hi; - u32 tx_stat_flowcontroldone_lo; - u32 tx_stat_etherstatscollisions_hi; - u32 tx_stat_etherstatscollisions_lo; - u32 tx_stat_dot3statssinglecollisionframes_hi; - u32 tx_stat_dot3statssinglecollisionframes_lo; - u32 tx_stat_dot3statsmultiplecollisionframes_hi; - u32 tx_stat_dot3statsmultiplecollisionframes_lo; - u32 tx_stat_dot3statsdeferredtransmissions_hi; - u32 tx_stat_dot3statsdeferredtransmissions_lo; - u32 tx_stat_dot3statsexcessivecollisions_hi; - u32 tx_stat_dot3statsexcessivecollisions_lo; - u32 tx_stat_dot3statslatecollisions_hi; - u32 tx_stat_dot3statslatecollisions_lo; - u32 tx_stat_etherstatspkts64octets_hi; - u32 tx_stat_etherstatspkts64octets_lo; - u32 tx_stat_etherstatspkts65octetsto127octets_hi; - u32 tx_stat_etherstatspkts65octetsto127octets_lo; - u32 tx_stat_etherstatspkts128octetsto255octets_hi; - u32 tx_stat_etherstatspkts128octetsto255octets_lo; - u32 tx_stat_etherstatspkts256octetsto511octets_hi; - u32 tx_stat_etherstatspkts256octetsto511octets_lo; - u32 tx_stat_etherstatspkts512octetsto1023octets_hi; - u32 tx_stat_etherstatspkts512octetsto1023octets_lo; - u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; - u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; - u32 tx_stat_etherstatspktsover1522octets_hi; - u32 tx_stat_etherstatspktsover1522octets_lo; - u32 tx_stat_bmac_2047_hi; - u32 tx_stat_bmac_2047_lo; - u32 tx_stat_bmac_4095_hi; - u32 tx_stat_bmac_4095_lo; - u32 tx_stat_bmac_9216_hi; - u32 tx_stat_bmac_9216_lo; - u32 tx_stat_bmac_16383_hi; - u32 tx_stat_bmac_16383_lo; - u32 tx_stat_dot3statsinternalmactransmiterrors_hi; - u32 tx_stat_dot3statsinternalmactransmiterrors_lo; - u32 tx_stat_bmac_ufl_hi; - u32 tx_stat_bmac_ufl_lo; - - u32 pause_frames_received_hi; - u32 pause_frames_received_lo; - u32 pause_frames_sent_hi; - u32 pause_frames_sent_lo; - - u32 etherstatspkts1024octetsto1522octets_hi; - u32 etherstatspkts1024octetsto1522octets_lo; - u32 etherstatspktsover1522octets_hi; - u32 etherstatspktsover1522octets_lo; - - u32 brb_drop_hi; - u32 brb_drop_lo; - u32 brb_truncate_hi; - u32 brb_truncate_lo; - - u32 mac_filter_discard; - u32 xxoverflow_discard; - u32 brb_truncate_discard; - u32 mac_discard; - - u32 driver_xoff; - u32 rx_err_discard_pkt; - u32 rx_skb_alloc_failed; - u32 hw_csum_err; - - u32 nig_timer_max; -}; - -#define BNX2X_NUM_STATS 43 -#define STATS_OFFSET32(stat_name) \ - (offsetof(struct bnx2x_eth_stats, stat_name) / 4) - #ifdef BCM_CNIC #define MAX_CONTEXT 15 @@ -1006,6 +805,8 @@ struct bnx2x { int multi_mode; int num_queues; + int disable_tpa; + int int_mode; u32 rx_mode; #define BNX2X_RX_MODE_NONE 0 @@ -1134,6 +935,10 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command); void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, u32 addr, u32 len); +void bnx2x_calc_fc_adv(struct bnx2x *bp); +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int common); +void bnx2x_update_coalesce(struct bnx2x *bp); static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, int wait) @@ -1375,6 +1180,18 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_VPD_LEN 128 #define VENDOR_ID_LEN 4 +#ifdef BNX2X_MAIN +#define BNX2X_EXTERN +#else +#define BNX2X_EXTERN extern +#endif + +BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */ + /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ +extern void bnx2x_set_ethtool_ops(struct net_device *netdev); + +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); + #endif /* bnx2x.h */ diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c new file mode 100644 index 0000000000000000000000000000000000000000..02bf710629a3eec2b4b05f17943c1565b3912144 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -0,0 +1,2252 @@ +/* bnx2x_cmn.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + + +#include +#include +#include +#include +#include "bnx2x_cmn.h" + +#ifdef BCM_VLAN +#include +#endif + +static int bnx2x_poll(struct napi_struct *napi, int budget); + +/* free skb in the packet ring at pos idx + * return idx of last bd freed + */ +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 idx) +{ + struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_bd *tx_data_bd; + struct sk_buff *skb = tx_buf->skb; + u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; + int nbd; + + /* prefetch skb end pointer to speedup dev_kfree_skb() */ + prefetch(&skb->end); + + DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", + idx, tx_buf, skb); + + /* unmap first bd */ + DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); + tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); + + nbd = le16_to_cpu(tx_start_bd->nbd) - 1; +#ifdef BNX2X_STOP_ON_ERROR + if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { + BNX2X_ERR("BAD nbd!\n"); + bnx2x_panic(); + } +#endif + new_cons = nbd + tx_buf->first_bd; + + /* Get the next bd */ + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* Skip a parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* ...and the TSO split header bd since they have no mapping */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* now free frags */ + while (nbd > 0) { + + DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); + tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; + dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + if (--nbd) + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* release skb */ + WARN_ON(!skb); + dev_kfree_skb(skb); + tx_buf->first_bd = 0; + tx_buf->skb = NULL; + + return new_cons; +} + +int bnx2x_tx_int(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + struct netdev_queue *txq; + u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -1; +#endif + + txq = netdev_get_tx_queue(bp->dev, fp->index); + hw_cons = le16_to_cpu(*fp->tx_cons_sb); + sw_cons = fp->tx_pkt_cons; + + while (sw_cons != hw_cons) { + u16 pkt_cons; + + pkt_cons = TX_BD(sw_cons); + + /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ + + DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", + hw_cons, sw_cons, pkt_cons); + +/* if (NEXT_TX_IDX(sw_cons) != hw_cons) { + rmb(); + prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); + } +*/ + bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); + sw_cons++; + } + + fp->tx_pkt_cons = sw_cons; + fp->tx_bd_cons = bd_cons; + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + */ + smp_mb(); + + /* TBD need a thresh? */ + if (unlikely(netif_tx_queue_stopped(txq))) { + /* Taking tx_lock() is needed to prevent reenabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in bnx2x_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(txq)) && + (bp->state == BNX2X_STATE_OPEN) && + (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) + netif_tx_wake_queue(txq); + + __netif_tx_unlock(txq); + } + return 0; +} + +static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, + u16 idx) +{ + u16 last_max = fp->last_max_sge; + + if (SUB_S16(idx, last_max) > 0) + fp->last_max_sge = idx; +} + +static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, + struct eth_fast_path_rx_cqe *fp_cqe) +{ + struct bnx2x *bp = fp->bp; + u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - + le16_to_cpu(fp_cqe->len_on_bd)) >> + SGE_PAGE_SHIFT; + u16 last_max, last_elem, first_elem; + u16 delta = 0; + u16 i; + + if (!sge_len) + return; + + /* First mark all used pages */ + for (i = 0; i < sge_len; i++) + SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); + + DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", + sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); + + /* Here we assume that the last SGE index is the biggest */ + prefetch((void *)(fp->sge_mask)); + bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); + + last_max = RX_SGE(fp->last_max_sge); + last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; + first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; + + /* If ring is not full */ + if (last_elem + 1 != first_elem) + last_elem++; + + /* Now update the prod */ + for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { + if (likely(fp->sge_mask[i])) + break; + + fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; + delta += RX_SGE_MASK_ELEM_SZ; + } + + if (delta > 0) { + fp->rx_sge_prod += delta; + /* clear page-end entries */ + bnx2x_clear_sge_mask_next_elems(fp); + } + + DP(NETIF_MSG_RX_STATUS, + "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", + fp->last_max_sge, fp->rx_sge_prod); +} + +static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, + struct sk_buff *skb, u16 cons, u16 prod) +{ + struct bnx2x *bp = fp->bp; + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + dma_addr_t mapping; + + /* move empty skb from pool to prod and map it */ + prod_rx_buf->skb = fp->tpa_pool[queue].skb; + mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, + bp->rx_buf_size, DMA_FROM_DEVICE); + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + + /* move partial skb from cons to pool (don't unmap yet) */ + fp->tpa_pool[queue] = *cons_rx_buf; + + /* mark bin state as start - print error if current state != stop */ + if (fp->tpa_state[queue] != BNX2X_TPA_STOP) + BNX2X_ERR("start of bin not in stop [%d]\n", queue); + + fp->tpa_state[queue] = BNX2X_TPA_START; + + /* point prod_bd to new skb */ + prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + +#ifdef BNX2X_STOP_ON_ERROR + fp->tpa_queue_used |= (1 << queue); +#ifdef _ASM_GENERIC_INT_L64_H + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", +#else + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", +#endif + fp->tpa_queue_used); +#endif +} + +static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct sk_buff *skb, + struct eth_fast_path_rx_cqe *fp_cqe, + u16 cqe_idx) +{ + struct sw_rx_page *rx_pg, old_rx_pg; + u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); + u32 i, frag_len, frag_size, pages; + int err; + int j; + + frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; + pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; + + /* This is needed in order to enable forwarding support */ + if (frag_size) + skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, + max(frag_size, (u32)len_on_bd)); + +#ifdef BNX2X_STOP_ON_ERROR + if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { + BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", + pages, cqe_idx); + BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", + fp_cqe->pkt_len, len_on_bd); + bnx2x_panic(); + return -EINVAL; + } +#endif + + /* Run through the SGL and compose the fragmented skb */ + for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { + u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); + + /* FW gives the indices of the SGE as if the ring is an array + (meaning that "next" element will consume 2 indices) */ + frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); + rx_pg = &fp->rx_page_ring[sge_idx]; + old_rx_pg = *rx_pg; + + /* If we fail to allocate a substitute page, we simply stop + where we are and drop the whole packet */ + err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); + if (unlikely(err)) { + fp->eth_q_stats.rx_skb_alloc_failed++; + return err; + } + + /* Unmap the page as we r going to pass it to the stack */ + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + + skb->data_len += frag_len; + skb->truesize += frag_len; + skb->len += frag_len; + + frag_size -= frag_len; + } + + return 0; +} + +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 queue, int pad, int len, union eth_rx_cqe *cqe, + u16 cqe_idx) +{ + struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; + struct sk_buff *skb = rx_buf->skb; + /* alloc new skb */ + struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + + /* Unmap skb in the pool anyway, as we are going to change + pool entry status to BNX2X_TPA_STOP even if new skb allocation + fails. */ + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, DMA_FROM_DEVICE); + + if (likely(new_skb)) { + /* fix ip xsum and give it to the stack */ + /* (no need to map the new skb) */ +#ifdef BCM_VLAN + int is_vlan_cqe = + (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & + PARSING_FLAGS_VLAN); + int is_not_hwaccel_vlan_cqe = + (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG))); +#endif + + prefetch(skb); + prefetch(((char *)(skb)) + 128); + +#ifdef BNX2X_STOP_ON_ERROR + if (pad + len > bp->rx_buf_size) { + BNX2X_ERR("skb_put is about to fail... " + "pad %d len %d rx_buf_size %d\n", + pad, len, bp->rx_buf_size); + bnx2x_panic(); + return; + } +#endif + + skb_reserve(skb, pad); + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, bp->dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + { + struct iphdr *iph; + + iph = (struct iphdr *)skb->data; +#ifdef BCM_VLAN + /* If there is no Rx VLAN offloading - + take VLAN tag into an account */ + if (unlikely(is_not_hwaccel_vlan_cqe)) + iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN); +#endif + iph->check = 0; + iph->check = ip_fast_csum((u8 *)iph, iph->ihl); + } + + if (!bnx2x_fill_frag_skb(bp, fp, skb, + &cqe->fast_path_cqe, cqe_idx)) { +#ifdef BCM_VLAN + if ((bp->vlgrp != NULL) && is_vlan_cqe && + (!is_not_hwaccel_vlan_cqe)) + vlan_gro_receive(&fp->napi, bp->vlgrp, + le16_to_cpu(cqe->fast_path_cqe. + vlan_tag), skb); + else +#endif + napi_gro_receive(&fp->napi, skb); + } else { + DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" + " - dropping packet!\n"); + dev_kfree_skb(skb); + } + + + /* put new skb in bin */ + fp->tpa_pool[queue].skb = new_skb; + + } else { + /* else drop the packet and keep the buffer in the bin */ + DP(NETIF_MSG_RX_STATUS, + "Failed to allocate new skb - dropping packet!\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; + } + + fp->tpa_state[queue] = BNX2X_TPA_STOP; +} + +/* Set Toeplitz hash value in the skb using the value from the + * CQE (calculated by HW). + */ +static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, + struct sk_buff *skb) +{ + /* Set Toeplitz hash from CQE */ + if ((bp->dev->features & NETIF_F_RXHASH) && + (cqe->fast_path_cqe.status_flags & + ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) + skb->rxhash = + le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); +} + +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) +{ + struct bnx2x *bp = fp->bp; + u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; + u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; + int rx_pkt = 0; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + + /* CQ "next element" is of the size of the regular element, + that's why it's ok here */ + hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); + if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + hw_comp_cons++; + + bd_cons = fp->rx_bd_cons; + bd_prod = fp->rx_bd_prod; + bd_prod_fw = bd_prod; + sw_comp_cons = fp->rx_comp_cons; + sw_comp_prod = fp->rx_comp_prod; + + /* Memory barrier necessary as speculative reads of the rx + * buffer can be ahead of the index in the status block + */ + rmb(); + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", + fp->index, hw_comp_cons, sw_comp_cons); + + while (sw_comp_cons != hw_comp_cons) { + struct sw_rx_bd *rx_buf = NULL; + struct sk_buff *skb; + union eth_rx_cqe *cqe; + u8 cqe_fp_flags; + u16 len, pad; + + comp_ring_cons = RCQ_BD(sw_comp_cons); + bd_prod = RX_BD(bd_prod); + bd_cons = RX_BD(bd_cons); + + /* Prefetch the page containing the BD descriptor + at producer's index. It will be needed when new skb is + allocated */ + prefetch((void *)(PAGE_ALIGN((unsigned long) + (&fp->rx_desc_ring[bd_prod])) - + PAGE_SIZE + 1)); + + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; + + DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" + " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), + cqe_fp_flags, cqe->fast_path_cqe.status_flags, + le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), + le16_to_cpu(cqe->fast_path_cqe.vlan_tag), + le16_to_cpu(cqe->fast_path_cqe.pkt_len)); + + /* is this a slowpath msg? */ + if (unlikely(CQE_TYPE(cqe_fp_flags))) { + bnx2x_sp_event(fp, cqe); + goto next_cqe; + + /* this is an rx packet */ + } else { + rx_buf = &fp->rx_buf_ring[bd_cons]; + skb = rx_buf->skb; + prefetch(skb); + len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); + pad = cqe->fast_path_cqe.placement_offset; + + /* If CQE is marked both TPA_START and TPA_END + it is a non-TPA CQE */ + if ((!fp->disable_tpa) && + (TPA_TYPE(cqe_fp_flags) != + (TPA_TYPE_START | TPA_TYPE_END))) { + u16 queue = cqe->fast_path_cqe.queue_index; + + if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { + DP(NETIF_MSG_RX_STATUS, + "calling tpa_start on queue %d\n", + queue); + + bnx2x_tpa_start(fp, queue, skb, + bd_cons, bd_prod); + + /* Set Toeplitz hash for an LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); + + goto next_rx; + } + + if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) { + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + if (!BNX2X_RX_SUM_FIX(cqe)) + BNX2X_ERR("STOP on none TCP " + "data\n"); + + /* This is a size of the linear data + on this skb */ + len = le16_to_cpu(cqe->fast_path_cqe. + len_on_bd); + bnx2x_tpa_stop(bp, fp, queue, pad, + len, cqe, comp_ring_cons); +#ifdef BNX2X_STOP_ON_ERROR + if (bp->panic) + return 0; +#endif + + bnx2x_update_sge_prod(fp, + &cqe->fast_path_cqe); + goto next_cqe; + } + } + + dma_sync_single_for_device(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); + prefetch(((char *)(skb)) + 128); + + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + DP(NETIF_MSG_RX_ERR, + "ERROR flags %x rx packet %u\n", + cqe_fp_flags, sw_comp_cons); + fp->eth_q_stats.rx_err_discard_pkt++; + goto reuse_rx; + } + + /* Since we don't have a jumbo ring + * copy small packets if mtu > 1500 + */ + if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && + (len <= RX_COPY_THRESH)) { + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(bp->dev, + len + pad); + if (new_skb == NULL) { + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped " + "because of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; + goto reuse_rx; + } + + /* aligned copy */ + skb_copy_from_linear_data_offset(skb, pad, + new_skb->data + pad, len); + skb_reserve(new_skb, pad); + skb_put(new_skb, len); + + bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); + + skb = new_skb; + + } else + if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, + DMA_FROM_DEVICE); + skb_reserve(skb, pad); + skb_put(skb, len); + + } else { + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped because " + "of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; +reuse_rx: + bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); + goto next_rx; + } + + skb->protocol = eth_type_trans(skb, bp->dev); + + /* Set Toeplitz hash for a none-LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); + + skb->ip_summed = CHECKSUM_NONE; + if (bp->rx_csum) { + if (likely(BNX2X_RX_CSUM_OK(cqe))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + fp->eth_q_stats.hw_csum_err++; + } + } + + skb_record_rx_queue(skb, fp->index); + +#ifdef BCM_VLAN + if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && + (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & + PARSING_FLAGS_VLAN)) + vlan_gro_receive(&fp->napi, bp->vlgrp, + le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb); + else +#endif + napi_gro_receive(&fp->napi, skb); + + +next_rx: + rx_buf->skb = NULL; + + bd_cons = NEXT_RX_IDX(bd_cons); + bd_prod = NEXT_RX_IDX(bd_prod); + bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); + rx_pkt++; +next_cqe: + sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); + sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + + if (rx_pkt == budget) + break; + } /* while */ + + fp->rx_bd_cons = bd_cons; + fp->rx_bd_prod = bd_prod_fw; + fp->rx_comp_cons = sw_comp_cons; + fp->rx_comp_prod = sw_comp_prod; + + /* Update producers */ + bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, + fp->rx_sge_prod); + + fp->rx_pkt += rx_pkt; + fp->rx_calls++; + + return rx_pkt; +} + +static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) +{ + struct bnx2x_fastpath *fp = fp_cookie; + struct bnx2x *bp = fp->bp; + + /* Return here if interrupt is disabled */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) { + DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); + return IRQ_HANDLED; + } + + DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", + fp->index, fp->sb_id); + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + /* Handle Rx and Tx according to MSI-X vector */ + prefetch(fp->rx_cons_sb); + prefetch(fp->tx_cons_sb); + prefetch(&fp->status_blk->u_status_block.status_block_index); + prefetch(&fp->status_blk->c_status_block.status_block_index); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + + return IRQ_HANDLED; +} + + +/* HW Lock for shared dual port PHYs */ +void bnx2x_acquire_phy_lock(struct bnx2x *bp) +{ + mutex_lock(&bp->port.phy_mutex); + + if (bp->port.need_hw_lock) + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); +} + +void bnx2x_release_phy_lock(struct bnx2x *bp) +{ + if (bp->port.need_hw_lock) + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); + + mutex_unlock(&bp->port.phy_mutex); +} + +void bnx2x_link_report(struct bnx2x *bp) +{ + if (bp->flags & MF_FUNC_DIS) { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + return; + } + + if (bp->link_vars.link_up) { + u16 line_speed; + + if (bp->state == BNX2X_STATE_OPEN) + netif_carrier_on(bp->dev); + netdev_info(bp->dev, "NIC Link is Up, "); + + line_speed = bp->link_vars.line_speed; + if (IS_E1HMF(bp)) { + u16 vn_max_rate; + + vn_max_rate = + ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT) * 100; + if (vn_max_rate < line_speed) + line_speed = vn_max_rate; + } + pr_cont("%d Mbps ", line_speed); + + if (bp->link_vars.duplex == DUPLEX_FULL) + pr_cont("full duplex"); + else + pr_cont("half duplex"); + + if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { + pr_cont(", receive "); + if (bp->link_vars.flow_ctrl & + BNX2X_FLOW_CTRL_TX) + pr_cont("& transmit "); + } else { + pr_cont(", transmit "); + } + pr_cont("flow control ON"); + } + pr_cont("\n"); + + } else { /* link_down */ + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + } +} + +void bnx2x_init_rx_rings(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H; + u16 ring_prod, cqe_ring_prod; + int i, j; + + bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; + DP(NETIF_MSG_IFUP, + "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); + + if (bp->flags & TPA_ENABLE_FLAG) { + + for_each_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + for (i = 0; i < max_agg_queues; i++) { + fp->tpa_pool[i].skb = + netdev_alloc_skb(bp->dev, bp->rx_buf_size); + if (!fp->tpa_pool[i].skb) { + BNX2X_ERR("Failed to allocate TPA " + "skb pool for queue[%d] - " + "disabling TPA on this " + "queue!\n", j); + bnx2x_free_tpa_pool(bp, fp, i); + fp->disable_tpa = 1; + break; + } + dma_unmap_addr_set((struct sw_rx_bd *) + &bp->fp->tpa_pool[i], + mapping, 0); + fp->tpa_state[i] = BNX2X_TPA_STOP; + } + } + } + + for_each_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + fp->rx_bd_cons = 0; + fp->rx_cons_sb = BNX2X_RX_SB_INDEX; + fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; + + /* "next page" elements initialization */ + /* SGE ring */ + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } + + bnx2x_init_sge_ring_bit_mask(fp); + + /* RX BD ring */ + for (i = 1; i <= NUM_RX_RINGS; i++) { + struct eth_rx_bd *rx_bd; + + rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; + rx_bd->addr_hi = + cpu_to_le32(U64_HI(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + rx_bd->addr_lo = + cpu_to_le32(U64_LO(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + } + + /* CQ ring */ + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } + + /* Allocate SGEs and initialize the ring elements */ + for (i = 0, ring_prod = 0; + i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { + + if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { + BNX2X_ERR("was only able to allocate " + "%d rx sges\n", i); + BNX2X_ERR("disabling TPA for queue[%d]\n", j); + /* Cleanup already allocated elements */ + bnx2x_free_rx_sge_range(bp, fp, ring_prod); + bnx2x_free_tpa_pool(bp, fp, max_agg_queues); + fp->disable_tpa = 1; + ring_prod = 0; + break; + } + ring_prod = NEXT_SGE_IDX(ring_prod); + } + fp->rx_sge_prod = ring_prod; + + /* Allocate BDs and initialize BD ring */ + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { + BNX2X_ERR("was only able to allocate " + "%d rx skbs on queue[%d]\n", i, j); + fp->eth_q_stats.rx_skb_alloc_failed++; + break; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= i); + } + + fp->rx_bd_prod = ring_prod; + /* must not have more available CQEs than BDs */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + /* Warning! + * this will generate an interrupt (to the TSTORM) + * must only be done after chip is initialized + */ + bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod, + fp->rx_sge_prod); + if (j != 0) + continue; + + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), + U64_LO(fp->rx_comp_mapping)); + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, + U64_HI(fp->rx_comp_mapping)); + } +} +static void bnx2x_free_tx_skbs(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + u16 bd_cons = fp->tx_bd_cons; + u16 sw_prod = fp->tx_pkt_prod; + u16 sw_cons = fp->tx_pkt_cons; + + while (sw_cons != sw_prod) { + bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); + sw_cons++; + } + } +} + +static void bnx2x_free_rx_skbs(struct bnx2x *bp) +{ + int i, j; + + for_each_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + for (i = 0; i < NUM_RX_BD; i++) { + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; + struct sk_buff *skb = rx_buf->skb; + + if (skb == NULL) + continue; + + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, DMA_FROM_DEVICE); + + rx_buf->skb = NULL; + dev_kfree_skb(skb); + } + if (!fp->disable_tpa) + bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? + ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H); + } +} + +void bnx2x_free_skbs(struct bnx2x *bp) +{ + bnx2x_free_tx_skbs(bp); + bnx2x_free_rx_skbs(bp); +} + +static void bnx2x_free_msix_irqs(struct bnx2x *bp) +{ + int i, offset = 1; + + free_irq(bp->msix_table[0].vector, bp->dev); + DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", + bp->msix_table[0].vector); + +#ifdef BCM_CNIC + offset++; +#endif + for_each_queue(bp, i) { + DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " + "state %x\n", i, bp->msix_table[i + offset].vector, + bnx2x_fp(bp, i, state)); + + free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); + } +} + +void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) +{ + if (bp->flags & USING_MSIX_FLAG) { + if (!disable_only) + bnx2x_free_msix_irqs(bp); + pci_disable_msix(bp->pdev); + bp->flags &= ~USING_MSIX_FLAG; + + } else if (bp->flags & USING_MSI_FLAG) { + if (!disable_only) + free_irq(bp->pdev->irq, bp->dev); + pci_disable_msi(bp->pdev); + bp->flags &= ~USING_MSI_FLAG; + + } else if (!disable_only) + free_irq(bp->pdev->irq, bp->dev); +} + +static int bnx2x_enable_msix(struct bnx2x *bp) +{ + int i, rc, offset = 1; + int igu_vec = 0; + + bp->msix_table[0].entry = igu_vec; + DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); + +#ifdef BCM_CNIC + igu_vec = BP_L_ID(bp) + offset; + bp->msix_table[1].entry = igu_vec; + DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); + offset++; +#endif + for_each_queue(bp, i) { + igu_vec = BP_L_ID(bp) + offset + i; + bp->msix_table[i + offset].entry = igu_vec; + DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " + "(fastpath #%u)\n", i + offset, igu_vec, i); + } + + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], + BNX2X_NUM_QUEUES(bp) + offset); + + /* + * reconfigure number of tx/rx queues according to available + * MSI-X vectors + */ + if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { + /* vectors available for FP */ + int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; + + DP(NETIF_MSG_IFUP, + "Trying to use less MSI-X vectors: %d\n", rc); + + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); + + if (rc) { + DP(NETIF_MSG_IFUP, + "MSI-X is not attainable rc %d\n", rc); + return rc; + } + + bp->num_queues = min(bp->num_queues, fp_vec); + + DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", + bp->num_queues); + } else if (rc) { + DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); + return rc; + } + + bp->flags |= USING_MSIX_FLAG; + + return 0; +} + +static int bnx2x_req_msix_irqs(struct bnx2x *bp) +{ + int i, rc, offset = 1; + + rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, + bp->dev->name, bp->dev); + if (rc) { + BNX2X_ERR("request sp irq failed\n"); + return -EBUSY; + } + +#ifdef BCM_CNIC + offset++; +#endif + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + bp->dev->name, i); + + rc = request_irq(bp->msix_table[i + offset].vector, + bnx2x_msix_fp_int, 0, fp->name, fp); + if (rc) { + BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); + bnx2x_free_msix_irqs(bp); + return -EBUSY; + } + + fp->state = BNX2X_FP_STATE_IRQ; + } + + i = BNX2X_NUM_QUEUES(bp); + netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" + " ... fp[%d] %d\n", + bp->msix_table[0].vector, + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + + return 0; +} + +static int bnx2x_enable_msi(struct bnx2x *bp) +{ + int rc; + + rc = pci_enable_msi(bp->pdev); + if (rc) { + DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); + return -1; + } + bp->flags |= USING_MSI_FLAG; + + return 0; +} + +static int bnx2x_req_irq(struct bnx2x *bp) +{ + unsigned long flags; + int rc; + + if (bp->flags & USING_MSI_FLAG) + flags = 0; + else + flags = IRQF_SHARED; + + rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, + bp->dev->name, bp->dev); + if (!rc) + bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; + + return rc; +} + +static void bnx2x_napi_enable(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) + napi_enable(&bnx2x_fp(bp, i, napi)); +} + +static void bnx2x_napi_disable(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) + napi_disable(&bnx2x_fp(bp, i, napi)); +} + +void bnx2x_netif_start(struct bnx2x *bp) +{ + int intr_sem; + + intr_sem = atomic_dec_and_test(&bp->intr_sem); + smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ + + if (intr_sem) { + if (netif_running(bp->dev)) { + bnx2x_napi_enable(bp); + bnx2x_int_enable(bp); + if (bp->state == BNX2X_STATE_OPEN) + netif_tx_wake_all_queues(bp->dev); + } + } +} + +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) +{ + bnx2x_int_disable_sync(bp, disable_hw); + bnx2x_napi_disable(bp); + netif_tx_disable(bp->dev); +} +static int bnx2x_set_num_queues(struct bnx2x *bp) +{ + int rc = 0; + + switch (bp->int_mode) { + case INT_MODE_INTx: + case INT_MODE_MSI: + bp->num_queues = 1; + DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); + break; + default: + /* Set number of queues according to bp->multi_mode value */ + bnx2x_set_num_queues_msix(bp); + + DP(NETIF_MSG_IFUP, "set number of queues to %d\n", + bp->num_queues); + + /* if we can't use MSI-X we only need one fp, + * so try to enable MSI-X with the requested number of fp's + * and fallback to MSI or legacy INTx with one fp + */ + rc = bnx2x_enable_msix(bp); + if (rc) + /* failed to enable MSI-X */ + bp->num_queues = 1; + break; + } + bp->dev->real_num_tx_queues = bp->num_queues; + return rc; +} + +/* must be called with rtnl_lock */ +int bnx2x_nic_load(struct bnx2x *bp, int load_mode) +{ + u32 load_code; + int i, rc; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EPERM; +#endif + + bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; + + rc = bnx2x_set_num_queues(bp); + + if (bnx2x_alloc_mem(bp)) { + bnx2x_free_irq(bp, true); + return -ENOMEM; + } + + for_each_queue(bp, i) + bnx2x_fp(bp, i, disable_tpa) = + ((bp->flags & TPA_ENABLE_FLAG) == 0); + + for_each_queue(bp, i) + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, 128); + + bnx2x_napi_enable(bp); + + if (bp->flags & USING_MSIX_FLAG) { + rc = bnx2x_req_msix_irqs(bp); + if (rc) { + bnx2x_free_irq(bp, true); + goto load_error1; + } + } else { + /* Fall to INTx if failed to enable MSI-X due to lack of + memory (in bnx2x_set_num_queues()) */ + if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx)) + bnx2x_enable_msi(bp); + bnx2x_ack_int(bp); + rc = bnx2x_req_irq(bp); + if (rc) { + BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); + bnx2x_free_irq(bp, true); + goto load_error1; + } + if (bp->flags & USING_MSI_FLAG) { + bp->dev->irq = bp->pdev->irq; + netdev_info(bp->dev, "using MSI IRQ %d\n", + bp->pdev->irq); + } + } + + /* Send LOAD_REQUEST command to MCP + Returns the type of LOAD command: + if it is the first port to be initialized + common blocks should be initialized, otherwise - not + */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + goto load_error2; + } + if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { + rc = -EBUSY; /* other port in diagnostic mode */ + goto load_error2; + } + + } else { + int port = BP_PORT(bp); + + DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", + load_count[0], load_count[1], load_count[2]); + load_count[0]++; + load_count[1 + port]++; + DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", + load_count[0], load_count[1], load_count[2]); + if (load_count[0] == 1) + load_code = FW_MSG_CODE_DRV_LOAD_COMMON; + else if (load_count[1 + port] == 1) + load_code = FW_MSG_CODE_DRV_LOAD_PORT; + else + load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; + } + + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) + bp->port.pmf = 1; + else + bp->port.pmf = 0; + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + + /* Initialize HW */ + rc = bnx2x_init_hw(bp, load_code); + if (rc) { + BNX2X_ERR("HW init failed, aborting\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); + goto load_error2; + } + + /* Setup NIC internals and enable interrupts */ + bnx2x_nic_init(bp, load_code); + + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && + (bp->common.shmem2_base)) + SHMEM2_WR(bp, dcc_support, + (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | + SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + + /* Send LOAD_DONE command to MCP */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + goto load_error3; + } + } + + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; + + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); +#ifndef BNX2X_STOP_ON_ERROR + goto load_error3; +#else + bp->panic = 1; + return -EBUSY; +#endif + } + + if (CHIP_IS_E1H(bp)) + if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; + } + + if (bp->state == BNX2X_STATE_OPEN) { +#ifdef BCM_CNIC + /* Enable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); +#endif + for_each_nondefault_queue(bp, i) { + rc = bnx2x_setup_multi(bp, i); + if (rc) +#ifdef BCM_CNIC + goto load_error4; +#else + goto load_error3; +#endif + } + + if (CHIP_IS_E1(bp)) + bnx2x_set_eth_mac_addr_e1(bp, 1); + else + bnx2x_set_eth_mac_addr_e1h(bp, 1); +#ifdef BCM_CNIC + /* Set iSCSI L2 MAC */ + mutex_lock(&bp->cnic_mutex); + if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { + bnx2x_set_iscsi_eth_mac_addr(bp, 1); + bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; + bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, + CNIC_SB_ID(bp)); + } + mutex_unlock(&bp->cnic_mutex); +#endif + } + + if (bp->port.pmf) + bnx2x_initial_phy_init(bp, load_mode); + + /* Start fast path */ + switch (load_mode) { + case LOAD_NORMAL: + if (bp->state == BNX2X_STATE_OPEN) { + /* Tx queue should be only reenabled */ + netif_tx_wake_all_queues(bp->dev); + } + /* Initialize the receive filter. */ + bnx2x_set_rx_mode(bp->dev); + break; + + case LOAD_OPEN: + netif_tx_start_all_queues(bp->dev); + if (bp->state != BNX2X_STATE_OPEN) + netif_tx_disable(bp->dev); + /* Initialize the receive filter. */ + bnx2x_set_rx_mode(bp->dev); + break; + + case LOAD_DIAG: + /* Initialize the receive filter. */ + bnx2x_set_rx_mode(bp->dev); + bp->state = BNX2X_STATE_DIAG; + break; + + default: + break; + } + + if (!bp->port.pmf) + bnx2x__link_status_update(bp); + + /* start the timer */ + mod_timer(&bp->timer, jiffies + bp->current_interval); + +#ifdef BCM_CNIC + bnx2x_setup_cnic_irq_info(bp); + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); +#endif + bnx2x_inc_load_cnt(bp); + + return 0; + +#ifdef BCM_CNIC +load_error4: + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); +#endif +load_error3: + bnx2x_int_disable_sync(bp, 1); + if (!BP_NOMCP(bp)) { + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); + } + bp->port.pmf = 0; + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); +load_error2: + /* Release IRQs */ + bnx2x_free_irq(bp, false); +load_error1: + bnx2x_napi_disable(bp); + for_each_queue(bp, i) + netif_napi_del(&bnx2x_fp(bp, i, napi)); + bnx2x_free_mem(bp); + + return rc; +} + +/* must be called with rtnl_lock */ +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) +{ + int i; + + if (bp->state == BNX2X_STATE_CLOSED) { + /* Interface has been removed - nothing to recover */ + bp->recovery_state = BNX2X_RECOVERY_DONE; + bp->is_leader = 0; + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); + smp_wmb(); + + return -EINVAL; + } + +#ifdef BCM_CNIC + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); +#endif + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; + + /* Set "drop all" */ + bp->rx_mode = BNX2X_RX_MODE_NONE; + bnx2x_set_storm_rx_mode(bp); + + /* Disable HW interrupts, NAPI and Tx */ + bnx2x_netif_stop(bp, 1); + netif_carrier_off(bp->dev); + + del_timer_sync(&bp->timer); + SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, + (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + /* Release IRQs */ + bnx2x_free_irq(bp, false); + + /* Cleanup the chip if needed */ + if (unload_mode != UNLOAD_RECOVERY) + bnx2x_chip_cleanup(bp, unload_mode); + + bp->port.pmf = 0; + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + for_each_queue(bp, i) + netif_napi_del(&bnx2x_fp(bp, i, napi)); + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + + /* The last driver must disable a "close the gate" if there is no + * parity attention or "process kill" pending. + */ + if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && + bnx2x_reset_is_done(bp)) + bnx2x_disable_close_the_gate(bp); + + /* Reset MCP mail box sequence if there is on going recovery */ + if (unload_mode == UNLOAD_RECOVERY) + bp->fw_seq = 0; + + return 0; +} +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) +{ + u16 pmcsr; + + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + + switch (state) { + case PCI_D0: + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | + PCI_PM_CTRL_PME_STATUS)); + + if (pmcsr & PCI_PM_CTRL_STATE_MASK) + /* delay required during transition out of D3hot */ + msleep(20); + break; + + case PCI_D3hot: + /* If there are other clients above don't + shut down the power */ + if (atomic_read(&bp->pdev->enable_cnt) != 1) + return 0; + /* Don't shut down the power for emulation and FPGA */ + if (CHIP_REV_IS_SLOW(bp)) + return 0; + + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pmcsr |= 3; + + if (bp->wol) + pmcsr |= PCI_PM_CTRL_PME_ENABLE; + + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + pmcsr); + + /* No more memory access after this point until + * device is brought back to D0. + */ + break; + + default: + return -EINVAL; + } + return 0; +} + + + +/* + * net_device service functions + */ + +static int bnx2x_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0; + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + + while (1) { +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) { + napi_complete(napi); + return 0; + } +#endif + + if (bnx2x_has_tx_work(fp)) + bnx2x_tx_int(fp); + + if (bnx2x_has_rx_work(fp)) { + work_done += bnx2x_rx_int(fp, budget - work_done); + + /* must not complete if we consumed full budget */ + if (work_done >= budget) + break; + } + + /* Fall out from the NAPI loop if needed */ + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + bnx2x_update_fpsb_idx(fp); + /* bnx2x_has_rx_work() reads the status block, thus we need + * to ensure that status block indices have been actually read + * (bnx2x_update_fpsb_idx) prior to this check + * (bnx2x_has_rx_work) so that we won't write the "newer" + * value of the status block to IGU (if there was a DMA right + * after bnx2x_has_rx_work and if there is no rmb, the memory + * reading (bnx2x_update_fpsb_idx) may be postponed to right + * before bnx2x_ack_sb). In this case there will never be + * another interrupt until there is another update of the + * status block, while there is still unhandled work. + */ + rmb(); + + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + napi_complete(napi); + /* Re-enable interrupts */ + bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, + le16_to_cpu(fp->fp_c_idx), + IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, + le16_to_cpu(fp->fp_u_idx), + IGU_INT_ENABLE, 1); + break; + } + } + } + + return work_done; +} + + +/* we split the first BD into headers and data BDs + * to ease the pain of our fellow microcode engineers + * we use one mapping for both BDs + * So far this has only been observed to happen + * in Other Operating Systems(TM) + */ +static noinline u16 bnx2x_tx_split(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + struct sw_tx_bd *tx_buf, + struct eth_tx_start_bd **tx_bd, u16 hlen, + u16 bd_prod, int nbd) +{ + struct eth_tx_start_bd *h_tx_bd = *tx_bd; + struct eth_tx_bd *d_tx_bd; + dma_addr_t mapping; + int old_len = le16_to_cpu(h_tx_bd->nbytes); + + /* first fix first BD */ + h_tx_bd->nbd = cpu_to_le16(nbd); + h_tx_bd->nbytes = cpu_to_le16(hlen); + + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " + "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, + h_tx_bd->addr_lo, h_tx_bd->nbd); + + /* now get a new data BD + * (after the pbd) and fill it */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; + + mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), + le32_to_cpu(h_tx_bd->addr_lo)) + hlen; + + d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); + + /* this marks the BD as one that has no individual mapping */ + tx_buf->flags |= BNX2X_TSO_SPLIT_BD; + + DP(NETIF_MSG_TX_QUEUED, + "TSO split data size is %d (%x:%x)\n", + d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); + + /* update tx_bd */ + *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; + + return bd_prod; +} + +static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) +{ + if (fix > 0) + csum = (u16) ~csum_fold(csum_sub(csum, + csum_partial(t_header - fix, fix, 0))); + + else if (fix < 0) + csum = (u16) ~csum_fold(csum_add(csum, + csum_partial(t_header, -fix, 0))); + + return swab16(csum); +} + +static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) +{ + u32 rc; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + rc = XMIT_PLAIN; + + else { + if (skb->protocol == htons(ETH_P_IPV6)) { + rc = XMIT_CSUM_V6; + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + + } else { + rc = XMIT_CSUM_V4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + } + } + + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); + + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); + + return rc; +} + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) +/* check if packet requires linearization (packet is too fragmented) + no need to check fragmentation if page size > 8K (there will be no + violation to FW restrictions) */ +static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, + u32 xmit_type) +{ + int to_copy = 0; + int hlen = 0; + int first_bd_sz = 0; + + /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ + if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { + + if (xmit_type & XMIT_GSO) { + unsigned short lso_mss = skb_shinfo(skb)->gso_size; + /* Check if LSO packet needs to be copied: + 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ + int wnd_size = MAX_FETCH_BD - 3; + /* Number of windows to check */ + int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; + int wnd_idx = 0; + int frag_idx = 0; + u32 wnd_sum = 0; + + /* Headers length */ + hlen = (int)(skb_transport_header(skb) - skb->data) + + tcp_hdrlen(skb); + + /* Amount of data (w/o headers) on linear part of SKB*/ + first_bd_sz = skb_headlen(skb) - hlen; + + wnd_sum = first_bd_sz; + + /* Calculate the first sum - it's special */ + for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) + wnd_sum += + skb_shinfo(skb)->frags[frag_idx].size; + + /* If there was data on linear skb data - check it */ + if (first_bd_sz > 0) { + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + goto exit_lbl; + } + + wnd_sum -= first_bd_sz; + } + + /* Others are easier: run through the frag list and + check all windows */ + for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { + wnd_sum += + skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; + + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + break; + } + wnd_sum -= + skb_shinfo(skb)->frags[wnd_idx].size; + } + } else { + /* in non-LSO too fragmented packet should always + be linearized */ + to_copy = 1; + } + } + +exit_lbl: + if (unlikely(to_copy)) + DP(NETIF_MSG_TX_QUEUED, + "Linearization IS REQUIRED for %s packet. " + "num_frags %d hlen %d first_bd_sz %d\n", + (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", + skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); + + return to_copy; +} +#endif + +/* called with netif_tx_lock + * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue() + */ +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_fastpath *fp; + struct netdev_queue *txq; + struct sw_tx_bd *tx_buf; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; + struct eth_tx_parse_bd *pbd = NULL; + u16 pkt_prod, bd_prod; + int nbd, fp_index; + dma_addr_t mapping; + u32 xmit_type = bnx2x_xmit_type(bp, skb); + int i; + u8 hlen = 0; + __le16 pkt_size = 0; + struct ethhdr *eth; + u8 mac_type = UNICAST_ADDRESS; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return NETDEV_TX_BUSY; +#endif + + fp_index = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, fp_index); + + fp = &bp->fp[fp_index]; + + if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { + fp->eth_q_stats.driver_xoff++; + netif_tx_stop_queue(txq); + BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" + " gso type %x xmit_type %x\n", + skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, + ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); + + eth = (struct ethhdr *)skb->data; + + /* set flag according to packet type (UNICAST_ADDRESS is default)*/ + if (unlikely(is_multicast_ether_addr(eth->h_dest))) { + if (is_broadcast_ether_addr(eth->h_dest)) + mac_type = BROADCAST_ADDRESS; + else + mac_type = MULTICAST_ADDRESS; + } + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) + /* First, check if we need to linearize the skb (due to FW + restrictions). No need to check fragmentation if page size > 8K + (there will be no violation to FW restrictions) */ + if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { + /* Statistics of linearization */ + bp->lin_cnt++; + if (skb_linearize(skb) != 0) { + DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " + "silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + + /* + Please read carefully. First we use one BD which we mark as start, + then we have a parsing info BD (used for TSO or xsum), + and only then we have the rest of the TSO BDs. + (don't forget to mark the last one as last, + and to unmap only AFTER you write to the BD ...) + And above all, all pdb sizes are in words - NOT DWORDS! + */ + + pkt_prod = fp->tx_pkt_prod++; + bd_prod = TX_BD(fp->tx_bd_prod); + + /* get a tx_buf and first BD */ + tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; + tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; + + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + tx_start_bd->general_data = (mac_type << + ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); + /* header nbd */ + tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); + + /* remember the first BD of the packet */ + tx_buf->first_bd = fp->tx_bd_prod; + tx_buf->skb = skb; + tx_buf->flags = 0; + + DP(NETIF_MSG_TX_QUEUED, + "sending pkt %u @%p next_idx %u bd %u @%p\n", + pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); + +#ifdef BCM_VLAN + if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && + (bp->flags & HW_VLAN_TX_FLAG)) { + tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; + } else +#endif + tx_start_bd->vlan = cpu_to_le16(pkt_prod); + + /* turn on parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + pbd = &fp->tx_desc_ring[bd_prod].parse_bd; + + memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); + + if (xmit_type & XMIT_CSUM) { + hlen = (skb_network_header(skb) - skb->data) / 2; + + /* for now NS flag is not used in Linux */ + pbd->global_data = + (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); + + pbd->ip_hlen = (skb_transport_header(skb) - + skb_network_header(skb)) / 2; + + hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; + + pbd->total_hlen = cpu_to_le16(hlen); + hlen = hlen*2; + + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; + + if (xmit_type & XMIT_CSUM_V4) + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IP_CSUM; + else + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IPV6; + + if (xmit_type & XMIT_CSUM_TCP) { + pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); + + } else { + s8 fix = SKB_CS_OFF(skb); /* signed! */ + + pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG; + + DP(NETIF_MSG_TX_QUEUED, + "hlen %d fix %d csum before fix %x\n", + le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); + + /* HW bug: fixup the CSUM */ + pbd->tcp_pseudo_csum = + bnx2x_csum_fix(skb_transport_header(skb), + SKB_CS(skb), fix); + + DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", + pbd->tcp_pseudo_csum); + } + } + + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + + tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ + tx_start_bd->nbd = cpu_to_le16(nbd); + tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); + pkt_size = tx_start_bd->nbytes; + + DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" + " nbytes %d flags %x vlan %x\n", + tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, + le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), + tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); + + if (xmit_type & XMIT_GSO) { + + DP(NETIF_MSG_TX_QUEUED, + "TSO packet len %d hlen %d total len %d tso size %d\n", + skb->len, hlen, skb_headlen(skb), + skb_shinfo(skb)->gso_size); + + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; + + if (unlikely(skb_headlen(skb) > hlen)) + bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, + hlen, bd_prod, ++nbd); + + pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); + pbd->tcp_flags = pbd_tcp_flags(skb); + + if (xmit_type & XMIT_GSO_V4) { + pbd->ip_id = swab16(ip_hdr(skb)->id); + pbd->tcp_pseudo_csum = + swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + } else + pbd->tcp_pseudo_csum = + swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; + } + tx_data_bd = (struct eth_tx_bd *)tx_start_bd; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; + if (total_pkt_bd == NULL) + total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; + + mapping = dma_map_page(&bp->pdev->dev, frag->page, + frag->page_offset, + frag->size, DMA_TO_DEVICE); + + tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_data_bd->nbytes = cpu_to_le16(frag->size); + le16_add_cpu(&pkt_size, frag->size); + + DP(NETIF_MSG_TX_QUEUED, + "frag %d bd @%p addr (%x:%x) nbytes %d\n", + i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, + le16_to_cpu(tx_data_bd->nbytes)); + } + + DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + /* now send a tx doorbell, counting the next BD + * if the packet contains or ends with it + */ + if (TX_BD_POFF(bd_prod) < nbd) + nbd++; + + if (total_pkt_bd != NULL) + total_pkt_bd->total_pkt_bytes = pkt_size; + + if (pbd) + DP(NETIF_MSG_TX_QUEUED, + "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" + " tcp_flags %x xsum %x seq %u hlen %u\n", + pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, + pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, + pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); + + DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + + /* + * Make sure that the BD data is updated before updating the producer + * since FW might read the BD right after the producer is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes packets must have BDs. + */ + wmb(); + + fp->tx_db.data.prod += nbd; + barrier(); + DOORBELL(bp, fp->index, fp->tx_db.raw); + + mmiowb(); + + fp->tx_bd_prod += nbd; + + if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { + netif_tx_stop_queue(txq); + + /* paired memory barrier is in bnx2x_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons */ + smp_mb(); + + fp->eth_q_stats.driver_xoff++; + if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) + netif_tx_wake_queue(txq); + } + fp->tx_pkt++; + + return NETDEV_TX_OK; +} +/* called with rtnl_lock */ +int bnx2x_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct bnx2x *bp = netdev_priv(dev); + + if (!is_valid_ether_addr((u8 *)(addr->sa_data))) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (netif_running(dev)) { + if (CHIP_IS_E1(bp)) + bnx2x_set_eth_mac_addr_e1(bp, 1); + else + bnx2x_set_eth_mac_addr_e1h(bp, 1); + } + + return 0; +} + +/* called with rtnl_lock */ +int bnx2x_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || + ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) + return -EINVAL; + + /* This does not race with packet allocation + * because the actual alloc size is + * only updated as part of load + */ + dev->mtu = new_mtu; + + if (netif_running(dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } + + return rc; +} + +void bnx2x_tx_timeout(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + +#ifdef BNX2X_STOP_ON_ERROR + if (!bp->panic) + bnx2x_panic(); +#endif + /* This allows the netif to be shutdown gracefully before resetting */ + schedule_delayed_work(&bp->reset_task, 0); +} + +#ifdef BCM_VLAN +/* called with rtnl_lock */ +void bnx2x_vlan_rx_register(struct net_device *dev, + struct vlan_group *vlgrp) +{ + struct bnx2x *bp = netdev_priv(dev); + + bp->vlgrp = vlgrp; + + /* Set flags according to the required capabilities */ + bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); + + if (dev->features & NETIF_F_HW_VLAN_TX) + bp->flags |= HW_VLAN_TX_FLAG; + + if (dev->features & NETIF_F_HW_VLAN_RX) + bp->flags |= HW_VLAN_RX_FLAG; + + if (netif_running(dev)) + bnx2x_set_client_config(bp); +} + +#endif +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + rtnl_lock(); + + pci_save_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + netif_device_detach(dev); + + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + + bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); + + rtnl_unlock(); + + return 0; +} + +int bnx2x_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + int rc; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + rtnl_lock(); + + pci_restore_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + bnx2x_set_power_state(bp, PCI_D0); + netif_device_attach(dev); + + rc = bnx2x_nic_load(bp, LOAD_OPEN); + + rtnl_unlock(); + + return rc; +} diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..d1979b1a7ed276f391d3a860aaf41a10449c139b --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -0,0 +1,652 @@ +/* bnx2x_cmn.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#ifndef BNX2X_CMN_H +#define BNX2X_CMN_H + +#include +#include + + +#include "bnx2x.h" + + +/*********************** Interfaces **************************** + * Functions that need to be implemented by each driver version + */ + +/** + * Initialize link parameters structure variables. + * + * @param bp + * @param load_mode + * + * @return u8 + */ +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); + +/** + * Configure hw according to link parameters structure. + * + * @param bp + */ +void bnx2x_link_set(struct bnx2x *bp); + +/** + * Query link status + * + * @param bp + * + * @return 0 - link is UP + */ +u8 bnx2x_link_test(struct bnx2x *bp); + +/** + * Handles link status change + * + * @param bp + */ +void bnx2x__link_status_update(struct bnx2x *bp); + +/** + * MSI-X slowpath interrupt handler + * + * @param irq + * @param dev_instance + * + * @return irqreturn_t + */ +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); + +/** + * non MSI-X interrupt handler + * + * @param irq + * @param dev_instance + * + * @return irqreturn_t + */ +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); +#ifdef BCM_CNIC + +/** + * Send command to cnic driver + * + * @param bp + * @param cmd + */ +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); + +/** + * Provides cnic information for proper interrupt handling + * + * @param bp + */ +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); +#endif + +/** + * Enable HW interrupts. + * + * @param bp + */ +void bnx2x_int_enable(struct bnx2x *bp); + +/** + * Disable interrupts. This function ensures that there are no + * ISRs or SP DPCs (sp_task) are running after it returns. + * + * @param bp + * @param disable_hw if true, disable HW interrupts. + */ +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); + +/** + * Init HW blocks according to current initialization stage: + * COMMON, PORT or FUNCTION. + * + * @param bp + * @param load_code: COMMON, PORT or FUNCTION + * + * @return int + */ +int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); + +/** + * Init driver internals: + * - rings + * - status blocks + * - etc. + * + * @param bp + * @param load_code COMMON, PORT or FUNCTION + */ +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); + +/** + * Allocate driver's memory. + * + * @param bp + * + * @return int + */ +int bnx2x_alloc_mem(struct bnx2x *bp); + +/** + * Release driver's memory. + * + * @param bp + */ +void bnx2x_free_mem(struct bnx2x *bp); + +/** + * Bring up a leading (the first) eth Client. + * + * @param bp + * + * @return int + */ +int bnx2x_setup_leading(struct bnx2x *bp); + +/** + * Setup non-leading eth Client. + * + * @param bp + * @param fp + * + * @return int + */ +int bnx2x_setup_multi(struct bnx2x *bp, int index); + +/** + * Set number of quueus according to mode and number of available + * msi-x vectors + * + * @param bp + * + */ +void bnx2x_set_num_queues_msix(struct bnx2x *bp); + +/** + * Cleanup chip internals: + * - Cleanup MAC configuration. + * - Close clients. + * - etc. + * + * @param bp + * @param unload_mode + */ +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); + +/** + * Acquire HW lock. + * + * @param bp + * @param resource Resource bit which was locked + * + * @return int + */ +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * Release HW lock. + * + * @param bp driver handle + * @param resource Resource bit which was locked + * + * @return int + */ +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * Configure eth MAC address in the HW according to the value in + * netdev->dev_addr for 57711 + * + * @param bp driver handle + * @param set + */ +void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); + +/** + * Configure eth MAC address in the HW according to the value in + * netdev->dev_addr for 57710 + * + * @param bp driver handle + * @param set + */ +void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set); + +#ifdef BCM_CNIC +/** + * Set iSCSI MAC(s) at the next enties in the CAM after the ETH + * MAC(s). The function will wait until the ramrod completion + * returns. + * + * @param bp driver handle + * @param set set or clear the CAM entry + * + * @return 0 if cussess, -ENODEV if ramrod doesn't return. + */ +int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set); +#endif + +/** + * Initialize status block in FW and HW + * + * @param bp driver handle + * @param sb host_status_block + * @param dma_addr_t mapping + * @param int sb_id + */ +void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, + dma_addr_t mapping, int sb_id); + +/** + * Reconfigure FW/HW according to dev->flags rx mode + * + * @param dev net_device + * + */ +void bnx2x_set_rx_mode(struct net_device *dev); + +/** + * Configure MAC filtering rules in a FW. + * + * @param bp driver handle + */ +void bnx2x_set_storm_rx_mode(struct bnx2x *bp); + +/* Parity errors related */ +void bnx2x_inc_load_cnt(struct bnx2x *bp); +u32 bnx2x_dec_load_cnt(struct bnx2x *bp); +bool bnx2x_chk_parity_attn(struct bnx2x *bp); +bool bnx2x_reset_is_done(struct bnx2x *bp); +void bnx2x_disable_close_the_gate(struct bnx2x *bp); + +/** + * Perform statistics handling according to event + * + * @param bp driver handle + * @param even tbnx2x_stats_event + */ +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); + +/** + * Configures FW with client paramteres (like HW VLAN removal) + * for each active client. + * + * @param bp + */ +void bnx2x_set_client_config(struct bnx2x *bp); + +/** + * Handle sp events + * + * @param fp fastpath handle for the event + * @param rr_cqe eth_rx_cqe + */ +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); + + +static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) +{ + struct host_status_block *fpsb = fp->status_blk; + + barrier(); /* status block is written to by the chip */ + fp->fp_c_idx = fpsb->c_status_block.status_block_index; + fp->fp_u_idx = fpsb->u_status_block.status_block_index; +} + +static inline void bnx2x_update_rx_prod(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, + u16 rx_sge_prod) +{ + struct ustorm_eth_rx_producers rx_prods = {0}; + int i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* + * Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, + ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); +} + + + +static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, + u8 storm, u16 index, u8 op, u8 update) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", + (*(u32 *)&igu_ack), hc_addr); + REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); +} +static inline u16 bnx2x_ack_int(struct bnx2x *bp) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_SIMD_MASK); + u32 result = REG_RD(bp, hc_addr); + + DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", + result, hc_addr); + + return result; +} + +/* + * fast path service functions + */ + +static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) +{ + /* Tell compiler that consumer and producer can change */ + barrier(); + return (fp->tx_pkt_prod != fp->tx_pkt_cons); +} + +static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) +{ + s16 used; + u16 prod; + u16 cons; + + prod = fp->tx_bd_prod; + cons = fp->tx_bd_cons; + + /* NUM_TX_RINGS = number of "next-page" entries + It will be used as a threshold */ + used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + +#ifdef BNX2X_STOP_ON_ERROR + WARN_ON(used < 0); + WARN_ON(used > fp->bp->tx_ring_size); + WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); +#endif + + return (s16)(fp->bp->tx_ring_size) - used; +} + +static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +{ + u16 hw_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + hw_cons = le16_to_cpu(*fp->tx_cons_sb); + return hw_cons != fp->tx_pkt_cons; +} + +static inline void bnx2x_free_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct page *page = sw_buf->page; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + + /* Skip "next page" elements */ + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); + __free_pages(page, PAGES_PER_SGE_SHIFT); + + sw_buf->page = NULL; + sge->addr_hi = 0; + sge->addr_lo = 0; +} + +static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + for (i = 0; i < last; i++) + bnx2x_free_rx_sge(bp, fp, i); +} + +static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) + return -ENOMEM; + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} +static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct sk_buff *skb; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + if (unlikely(skb == NULL)) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + rx_buf->skb = skb; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +/* note that we are not allocating a new skb, + * we are just moving one from cons to prod + * we are not creating a new mapping, + * so there is no need to check for dma_mapping_error(). + */ +static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, + struct sk_buff *skb, u16 cons, u16 prod) +{ + struct bnx2x *bp = fp->bp; + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + + dma_sync_single_for_device(&bp->pdev->dev, + dma_unmap_addr(cons_rx_buf, mapping), + RX_COPY_THRESH, DMA_FROM_DEVICE); + + prod_rx_buf->skb = cons_rx_buf->skb; + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + *prod_bd = *cons_bd; +} + +static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) +{ + int i, j; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + int idx = RX_SGE_CNT * i - 1; + + for (j = 0; j < 2; j++) { + SGE_MASK_CLEAR_BIT(fp, idx); + idx--; + } + } +} + +static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) +{ + /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ + memset(fp->sge_mask, 0xff, + (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); + + /* Clear the two last indices in the page to 1: + these are the indices that correspond to the "next" element, + hence will never be indicated and should be removed from + the calculations. */ + bnx2x_clear_sge_mask_next_elems(fp); +} +static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + for (i = 0; i < last; i++) { + struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); + struct sk_buff *skb = rx_buf->skb; + + if (skb == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + + if (fp->tpa_state[i] == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, DMA_FROM_DEVICE); + + dev_kfree_skb(skb); + rx_buf->skb = NULL; + } +} + + +static inline void bnx2x_init_tx_ring(struct bnx2x *bp) +{ + int i, j; + + for_each_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(fp->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(fp->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; + fp->tx_db.data.zero_fill1 = 0; + fp->tx_db.data.prod = 0; + + fp->tx_pkt_prod = 0; + fp->tx_pkt_cons = 0; + fp->tx_bd_prod = 0; + fp->tx_bd_cons = 0; + fp->tx_cons_sb = BNX2X_TX_SB_INDEX; + fp->tx_pkt = 0; + } +} +static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) +{ + u16 rx_cons_sb; + + /* Tell compiler that status block fields can change */ + barrier(); + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; + return (fp->rx_comp_cons != rx_cons_sb); +} + +/* HW Lock for shared dual port PHYs */ +void bnx2x_acquire_phy_lock(struct bnx2x *bp); +void bnx2x_release_phy_lock(struct bnx2x *bp); + +void bnx2x_link_report(struct bnx2x *bp); +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); +int bnx2x_tx_int(struct bnx2x_fastpath *fp); +void bnx2x_init_rx_rings(struct bnx2x *bp); +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); + +int bnx2x_change_mac_addr(struct net_device *dev, void *p); +void bnx2x_tx_timeout(struct net_device *dev); +void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp); +void bnx2x_netif_start(struct bnx2x *bp); +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); +void bnx2x_free_irq(struct bnx2x *bp, bool disable_only); +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); +int bnx2x_resume(struct pci_dev *pdev); +void bnx2x_free_skbs(struct bnx2x *bp); +int bnx2x_change_mtu(struct net_device *dev, int new_mtu); +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); +int bnx2x_nic_load(struct bnx2x *bp, int load_mode); +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); + +#endif /* BNX2X_CMN_H */ diff --git a/drivers/net/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h similarity index 100% rename from drivers/net/bnx2x_dump.h rename to drivers/net/bnx2x/bnx2x_dump.h diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..8b75b05e34c5a124d246a862ea6460ef80e6ec47 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -0,0 +1,1971 @@ +/* bnx2x_ethtool.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#include +#include +#include +#include +#include + + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dump.h" + + +static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + + cmd->supported = bp->port.supported; + cmd->advertising = bp->port.advertising; + + if ((bp->state == BNX2X_STATE_OPEN) && + !(bp->flags & MF_FUNC_DIS) && + (bp->link_vars.link_up)) { + cmd->speed = bp->link_vars.line_speed; + cmd->duplex = bp->link_vars.duplex; + if (IS_E1HMF(bp)) { + u16 vn_max_rate; + + vn_max_rate = + ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT) * 100; + if (vn_max_rate < cmd->speed) + cmd->speed = vn_max_rate; + } + } else { + cmd->speed = -1; + cmd->duplex = -1; + } + + if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { + u32 ext_phy_type = + XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); + + switch (ext_phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + cmd->port = PORT_FIBRE; + break; + + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: + cmd->port = PORT_TP; + break; + + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + BNX2X_ERR("XGXS PHY Failure detected 0x%x\n", + bp->link_params.ext_phy_config); + break; + + default: + DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", + bp->link_params.ext_phy_config); + break; + } + } else + cmd->port = PORT_TP; + + cmd->phy_address = bp->mdio.prtad; + cmd->transceiver = XCVR_INTERNAL; + + if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) + cmd->autoneg = AUTONEG_ENABLE; + else + cmd->autoneg = AUTONEG_DISABLE; + + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + + DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" + DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" + DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" + DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", + cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, + cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, + cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + + return 0; +} + +static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 advertising; + + if (IS_E1HMF(bp)) + return 0; + + DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" + DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" + DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" + DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", + cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, + cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, + cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + + if (cmd->autoneg == AUTONEG_ENABLE) { + if (!(bp->port.supported & SUPPORTED_Autoneg)) { + DP(NETIF_MSG_LINK, "Autoneg not supported\n"); + return -EINVAL; + } + + /* advertise the requested speed and duplex if supported */ + cmd->advertising &= bp->port.supported; + + bp->link_params.req_line_speed = SPEED_AUTO_NEG; + bp->link_params.req_duplex = DUPLEX_FULL; + bp->port.advertising |= (ADVERTISED_Autoneg | + cmd->advertising); + + } else { /* forced speed */ + /* advertise the requested speed and duplex if supported */ + switch (cmd->speed) { + case SPEED_10: + if (cmd->duplex == DUPLEX_FULL) { + if (!(bp->port.supported & + SUPPORTED_10baseT_Full)) { + DP(NETIF_MSG_LINK, + "10M full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10baseT_Full | + ADVERTISED_TP); + } else { + if (!(bp->port.supported & + SUPPORTED_10baseT_Half)) { + DP(NETIF_MSG_LINK, + "10M half not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_TP); + } + break; + + case SPEED_100: + if (cmd->duplex == DUPLEX_FULL) { + if (!(bp->port.supported & + SUPPORTED_100baseT_Full)) { + DP(NETIF_MSG_LINK, + "100M full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_100baseT_Full | + ADVERTISED_TP); + } else { + if (!(bp->port.supported & + SUPPORTED_100baseT_Half)) { + DP(NETIF_MSG_LINK, + "100M half not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_TP); + } + break; + + case SPEED_1000: + if (cmd->duplex != DUPLEX_FULL) { + DP(NETIF_MSG_LINK, "1G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { + DP(NETIF_MSG_LINK, "1G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + break; + + case SPEED_2500: + if (cmd->duplex != DUPLEX_FULL) { + DP(NETIF_MSG_LINK, + "2.5G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { + DP(NETIF_MSG_LINK, + "2.5G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_2500baseX_Full | + ADVERTISED_TP); + break; + + case SPEED_10000: + if (cmd->duplex != DUPLEX_FULL) { + DP(NETIF_MSG_LINK, "10G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { + DP(NETIF_MSG_LINK, "10G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + break; + + default: + DP(NETIF_MSG_LINK, "Unsupported speed\n"); + return -EINVAL; + } + + bp->link_params.req_line_speed = cmd->speed; + bp->link_params.req_duplex = cmd->duplex; + bp->port.advertising = advertising; + } + + DP(NETIF_MSG_LINK, "req_line_speed %d\n" + DP_LEVEL " req_duplex %d advertising 0x%x\n", + bp->link_params.req_line_speed, bp->link_params.req_duplex, + bp->port.advertising); + + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) +#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) + +static int bnx2x_get_regs_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + int i; + + if (CHIP_IS_E1(bp)) { + for (i = 0; i < REGS_COUNT; i++) + if (IS_E1_ONLINE(reg_addrs[i].info)) + regdump_len += reg_addrs[i].size; + + for (i = 0; i < WREGS_COUNT_E1; i++) + if (IS_E1_ONLINE(wreg_addrs_e1[i].info)) + regdump_len += wreg_addrs_e1[i].size * + (1 + wreg_addrs_e1[i].read_regs_count); + + } else { /* E1H */ + for (i = 0; i < REGS_COUNT; i++) + if (IS_E1H_ONLINE(reg_addrs[i].info)) + regdump_len += reg_addrs[i].size; + + for (i = 0; i < WREGS_COUNT_E1H; i++) + if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) + regdump_len += wreg_addrs_e1h[i].size * + (1 + wreg_addrs_e1h[i].read_regs_count); + } + regdump_len *= 4; + regdump_len += sizeof(struct dump_hdr); + + return regdump_len; +} + +static void bnx2x_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) +{ + u32 *p = _p, i, j; + struct bnx2x *bp = netdev_priv(dev); + struct dump_hdr dump_hdr = {0}; + + regs->version = 0; + memset(p, 0, regs->len); + + if (!netif_running(bp->dev)) + return; + + dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; + dump_hdr.dump_sign = dump_sign_all; + dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); + dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); + dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); + dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); + dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE; + + memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); + p += dump_hdr.hdr_size + 1; + + if (CHIP_IS_E1(bp)) { + for (i = 0; i < REGS_COUNT; i++) + if (IS_E1_ONLINE(reg_addrs[i].info)) + for (j = 0; j < reg_addrs[i].size; j++) + *p++ = REG_RD(bp, + reg_addrs[i].addr + j*4); + + } else { /* E1H */ + for (i = 0; i < REGS_COUNT; i++) + if (IS_E1H_ONLINE(reg_addrs[i].info)) + for (j = 0; j < reg_addrs[i].size; j++) + *p++ = REG_RD(bp, + reg_addrs[i].addr + j*4); + } +} + +#define PHY_FW_VER_LEN 10 + +static void bnx2x_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct bnx2x *bp = netdev_priv(dev); + u8 phy_fw_ver[PHY_FW_VER_LEN]; + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + + phy_fw_ver[0] = '\0'; + if (bp->port.pmf) { + bnx2x_acquire_phy_lock(bp); + bnx2x_get_ext_phy_fw_version(&bp->link_params, + (bp->state != BNX2X_STATE_CLOSED), + phy_fw_ver, PHY_FW_VER_LEN); + bnx2x_release_phy_lock(bp); + } + + strncpy(info->fw_version, bp->fw_ver, 32); + snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), + "bc %d.%d.%d%s%s", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff), + ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); + strcpy(info->bus_info, pci_name(bp->pdev)); + info->n_stats = BNX2X_NUM_STATS; + info->testinfo_len = BNX2X_NUM_TESTS; + info->eedump_len = bp->common.flash_size; + info->regdump_len = bnx2x_get_regs_len(dev); +} + +static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & NO_WOL_FLAG) { + wol->supported = 0; + wol->wolopts = 0; + } else { + wol->supported = WAKE_MAGIC; + if (bp->wol) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + } + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) { + if (bp->flags & NO_WOL_FLAG) + return -EINVAL; + + bp->wol = 1; + } else + bp->wol = 0; + + return 0; +} + +static u32 bnx2x_get_msglevel(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->msg_enable; +} + +static void bnx2x_set_msglevel(struct net_device *dev, u32 level) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (capable(CAP_NET_ADMIN)) + bp->msg_enable = level; +} + +static int bnx2x_nway_reset(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (!bp->port.pmf) + return 0; + + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +static u32 bnx2x_get_link(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & MF_FUNC_DIS) + return 0; + + return bp->link_vars.link_up; +} + +static int bnx2x_get_eeprom_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->common.flash_size; +} + +static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int count, i; + u32 val = 0; + + /* adjust timeout for emulation/FPGA */ + count = NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* request access to nvram interface */ + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); + + for (i = 0; i < count*10; i++) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); + if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) + break; + + udelay(5); + } + + if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { + DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); + return -EBUSY; + } + + return 0; +} + +static int bnx2x_release_nvram_lock(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int count, i; + u32 val = 0; + + /* adjust timeout for emulation/FPGA */ + count = NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* relinquish nvram interface */ + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); + + for (i = 0; i < count*10; i++) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); + if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) + break; + + udelay(5); + } + + if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { + DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); + return -EBUSY; + } + + return 0; +} + +static void bnx2x_enable_nvram_access(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + + /* enable both bits, even on read */ + REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, + (val | MCPR_NVM_ACCESS_ENABLE_EN | + MCPR_NVM_ACCESS_ENABLE_WR_EN)); +} + +static void bnx2x_disable_nvram_access(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + + /* disable both bits, even after read */ + REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, + (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | + MCPR_NVM_ACCESS_ENABLE_WR_EN))); +} + +static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, + u32 cmd_flags) +{ + int count, i, rc; + u32 val; + + /* build the command word */ + cmd_flags |= MCPR_NVM_COMMAND_DOIT; + + /* need to clear DONE bit separately */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + + /* address of the NVRAM to read from */ + REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, + (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); + + /* issue a read command */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); + + /* adjust timeout for emulation/FPGA */ + count = NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* wait for completion */ + *ret_val = 0; + rc = -EBUSY; + for (i = 0; i < count; i++) { + udelay(5); + val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); + + if (val & MCPR_NVM_COMMAND_DONE) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); + /* we read nvram data in cpu order + * but ethtool sees it as an array of bytes + * converting to big-endian will do the work */ + *ret_val = cpu_to_be32(val); + rc = 0; + break; + } + } + + return rc; +} + +static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + __be32 val; + + if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { + DP(BNX2X_MSG_NVM, + "Invalid parameter: offset 0x%x buf_size 0x%x\n", + offset, buf_size); + return -EINVAL; + } + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + /* read the first word(s) */ + cmd_flags = MCPR_NVM_COMMAND_FIRST; + while ((buf_size > sizeof(u32)) && (rc == 0)) { + rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); + memcpy(ret_buf, &val, 4); + + /* advance to the next dword */ + offset += sizeof(u32); + ret_buf += sizeof(u32); + buf_size -= sizeof(u32); + cmd_flags = 0; + } + + if (rc == 0) { + cmd_flags |= MCPR_NVM_COMMAND_LAST; + rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); + memcpy(ret_buf, &val, 4); + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *eebuf) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EAGAIN; + + DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", + eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, + eeprom->len, eeprom->len); + + /* parameters already validated in ethtool_get_eeprom */ + + rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, + u32 cmd_flags) +{ + int count, i, rc; + + /* build the command word */ + cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; + + /* need to clear DONE bit separately */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + + /* write the data */ + REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); + + /* address of the NVRAM to write to */ + REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, + (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); + + /* issue the write command */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); + + /* adjust timeout for emulation/FPGA */ + count = NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* wait for completion */ + rc = -EBUSY; + for (i = 0; i < count; i++) { + udelay(5); + val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); + if (val & MCPR_NVM_COMMAND_DONE) { + rc = 0; + break; + } + } + + return rc; +} + +#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) + +static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + u32 align_offset; + __be32 val; + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); + align_offset = (offset & ~0x03); + rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); + + if (rc == 0) { + val &= ~(0xff << BYTE_OFFSET(offset)); + val |= (*data_buf << BYTE_OFFSET(offset)); + + /* nvram data is returned as an array of bytes + * convert it back to cpu order */ + val = be32_to_cpu(val); + + rc = bnx2x_nvram_write_dword(bp, align_offset, val, + cmd_flags); + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + u32 val; + u32 written_so_far; + + if (buf_size == 1) /* ethtool */ + return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); + + if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { + DP(BNX2X_MSG_NVM, + "Invalid parameter: offset 0x%x buf_size 0x%x\n", + offset, buf_size); + return -EINVAL; + } + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + written_so_far = 0; + cmd_flags = MCPR_NVM_COMMAND_FIRST; + while ((written_so_far < buf_size) && (rc == 0)) { + if (written_so_far == (buf_size - sizeof(u32))) + cmd_flags |= MCPR_NVM_COMMAND_LAST; + else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) + cmd_flags |= MCPR_NVM_COMMAND_LAST; + else if ((offset % NVRAM_PAGE_SIZE) == 0) + cmd_flags |= MCPR_NVM_COMMAND_FIRST; + + memcpy(&val, data_buf, 4); + + rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); + + /* advance to the next dword */ + offset += sizeof(u32); + data_buf += sizeof(u32); + written_so_far += sizeof(u32); + cmd_flags = 0; + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *eebuf) +{ + struct bnx2x *bp = netdev_priv(dev); + int port = BP_PORT(bp); + int rc = 0; + + if (!netif_running(dev)) + return -EAGAIN; + + DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", + eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, + eeprom->len, eeprom->len); + + /* parameters already validated in ethtool_set_eeprom */ + + /* PHY eeprom can be accessed only by the PMF */ + if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && + !bp->port.pmf) + return -EINVAL; + + if (eeprom->magic == 0x50485950) { + /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + bnx2x_acquire_phy_lock(bp); + rc |= bnx2x_link_reset(&bp->link_params, + &bp->link_vars, 0); + if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_HIGH, port); + bnx2x_release_phy_lock(bp); + bnx2x_link_report(bp); + + } else if (eeprom->magic == 0x50485952) { + /* 'PHYR' (0x50485952): re-init link after FW upgrade */ + if (bp->state == BNX2X_STATE_OPEN) { + bnx2x_acquire_phy_lock(bp); + rc |= bnx2x_link_reset(&bp->link_params, + &bp->link_vars, 1); + + rc |= bnx2x_phy_init(&bp->link_params, + &bp->link_vars); + bnx2x_release_phy_lock(bp); + bnx2x_calc_fc_adv(bp); + } + } else if (eeprom->magic == 0x53985943) { + /* 'PHYC' (0x53985943): PHY FW upgrade completed */ + if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { + u8 ext_phy_addr = + XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); + + /* DSP Remove Download Mode */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_LOW, port); + + bnx2x_acquire_phy_lock(bp); + + bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); + + /* wait 0.5 sec to allow it to run */ + msleep(500); + bnx2x_ext_phy_hw_reset(bp, port); + msleep(500); + bnx2x_release_phy_lock(bp); + } + } else + rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} +static int bnx2x_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnx2x *bp = netdev_priv(dev); + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + + coal->rx_coalesce_usecs = bp->rx_ticks; + coal->tx_coalesce_usecs = bp->tx_ticks; + + return 0; +} + +static int bnx2x_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnx2x *bp = netdev_priv(dev); + + bp->rx_ticks = (u16)coal->rx_coalesce_usecs; + if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; + + bp->tx_ticks = (u16)coal->tx_coalesce_usecs; + if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; + + if (netif_running(dev)) + bnx2x_update_coalesce(bp); + + return 0; +} + +static void bnx2x_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnx2x *bp = netdev_priv(dev); + + ering->rx_max_pending = MAX_RX_AVAIL; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + + ering->rx_pending = bp->rx_ring_size; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + + ering->tx_max_pending = MAX_TX_AVAIL; + ering->tx_pending = bp->tx_ring_size; +} + +static int bnx2x_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + if ((ering->rx_pending > MAX_RX_AVAIL) || + (ering->tx_pending > MAX_TX_AVAIL) || + (ering->tx_pending <= MAX_SKB_FRAGS + 4)) + return -EINVAL; + + bp->rx_ring_size = ering->rx_pending; + bp->tx_ring_size = ering->tx_pending; + + if (netif_running(dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } + + return rc; +} + +static void bnx2x_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnx2x *bp = netdev_priv(dev); + + epause->autoneg = (bp->link_params.req_flow_ctrl == + BNX2X_FLOW_CTRL_AUTO) && + (bp->link_params.req_line_speed == SPEED_AUTO_NEG); + + epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == + BNX2X_FLOW_CTRL_RX); + epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) == + BNX2X_FLOW_CTRL_TX); + + DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" + DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); +} + +static int bnx2x_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (IS_E1HMF(bp)) + return 0; + + DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" + DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); + + bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; + + if (epause->rx_pause) + bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX; + + if (epause->tx_pause) + bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX; + + if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) + bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + if (epause->autoneg) { + if (!(bp->port.supported & SUPPORTED_Autoneg)) { + DP(NETIF_MSG_LINK, "autoneg not supported\n"); + return -EINVAL; + } + + if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) + bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; + } + + DP(NETIF_MSG_LINK, + "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); + + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +static int bnx2x_set_flags(struct net_device *dev, u32 data) +{ + struct bnx2x *bp = netdev_priv(dev); + int changed = 0; + int rc = 0; + + if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH)) + return -EINVAL; + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + /* TPA requires Rx CSUM offloading */ + if ((data & ETH_FLAG_LRO) && bp->rx_csum) { + if (!bp->disable_tpa) { + if (!(dev->features & NETIF_F_LRO)) { + dev->features |= NETIF_F_LRO; + bp->flags |= TPA_ENABLE_FLAG; + changed = 1; + } + } else + rc = -EINVAL; + } else if (dev->features & NETIF_F_LRO) { + dev->features &= ~NETIF_F_LRO; + bp->flags &= ~TPA_ENABLE_FLAG; + changed = 1; + } + + if (data & ETH_FLAG_RXHASH) + dev->features |= NETIF_F_RXHASH; + else + dev->features &= ~NETIF_F_RXHASH; + + if (changed && netif_running(dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } + + return rc; +} + +static u32 bnx2x_get_rx_csum(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->rx_csum; +} + +static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + bp->rx_csum = data; + + /* Disable TPA, when Rx CSUM is disabled. Otherwise all + TPA'ed packets will be discarded due to wrong TCP CSUM */ + if (!data) { + u32 flags = ethtool_op_get_flags(dev); + + rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO)); + } + + return rc; +} + +static int bnx2x_set_tso(struct net_device *dev, u32 data) +{ + if (data) { + dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); + dev->features |= NETIF_F_TSO6; + } else { + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN); + dev->features &= ~NETIF_F_TSO6; + } + + return 0; +} + +static const struct { + char string[ETH_GSTRING_LEN]; +} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { + { "register_test (offline)" }, + { "memory_test (offline)" }, + { "loopback_test (offline)" }, + { "nvram_test (online)" }, + { "interrupt_test (online)" }, + { "link_test (online)" }, + { "idle check (online)" } +}; + +static int bnx2x_test_registers(struct bnx2x *bp) +{ + int idx, i, rc = -ENODEV; + u32 wr_val = 0; + int port = BP_PORT(bp); + static const struct { + u32 offset0; + u32 offset1; + u32 mask; + } reg_tbl[] = { +/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, + { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, + { HC_REG_AGG_INT_0, 4, 0x000003ff }, + { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, + { PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, + { PRS_REG_CID_PORT_0, 4, 0x00ffffff }, + { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, + { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, + { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, + { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, +/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, + { QM_REG_CONNNUM_0, 4, 0x000fffff }, + { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, + { SRC_REG_KEYRSS0_0, 40, 0xffffffff }, + { SRC_REG_KEYRSS0_7, 40, 0xffffffff }, + { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, + { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, + { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, + { NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, + { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, +/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, + { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, + { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, + { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, + { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, + { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, + { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, + { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, + { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, + { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, +/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, + { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, + { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, + { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 }, + { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, + { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, + { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, + + { 0xffffffff, 0, 0x00000000 } + }; + + if (!netif_running(bp->dev)) + return rc; + + /* Repeat the test twice: + First by writing 0x00000000, second by writing 0xffffffff */ + for (idx = 0; idx < 2; idx++) { + + switch (idx) { + case 0: + wr_val = 0; + break; + case 1: + wr_val = 0xffffffff; + break; + } + + for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { + u32 offset, mask, save_val, val; + + offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; + mask = reg_tbl[i].mask; + + save_val = REG_RD(bp, offset); + + REG_WR(bp, offset, (wr_val & mask)); + val = REG_RD(bp, offset); + + /* Restore the original register's value */ + REG_WR(bp, offset, save_val); + + /* verify value is as expected */ + if ((val & mask) != (wr_val & mask)) { + DP(NETIF_MSG_PROBE, + "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", + offset, val, wr_val, mask); + goto test_reg_exit; + } + } + } + + rc = 0; + +test_reg_exit: + return rc; +} + +static int bnx2x_test_memory(struct bnx2x *bp) +{ + int i, j, rc = -ENODEV; + u32 val; + static const struct { + u32 offset; + int size; + } mem_tbl[] = { + { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, + { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, + { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, + { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, + { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, + { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, + { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, + + { 0xffffffff, 0 } + }; + static const struct { + char *name; + u32 offset; + u32 e1_mask; + u32 e1h_mask; + } prty_tbl[] = { + { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, + { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, + { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, + { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, + { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, + { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, + + { NULL, 0xffffffff, 0, 0 } + }; + + if (!netif_running(bp->dev)) + return rc; + + /* Go through all the memories */ + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) + for (j = 0; j < mem_tbl[i].size; j++) + REG_RD(bp, mem_tbl[i].offset + j*4); + + /* Check the parity status */ + for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { + val = REG_RD(bp, prty_tbl[i].offset); + if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || + (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { + DP(NETIF_MSG_HW, + "%s is 0x%x\n", prty_tbl[i].name, val); + goto test_mem_exit; + } + } + + rc = 0; + +test_mem_exit: + return rc; +} + +static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) +{ + int cnt = 1000; + + if (link_up) + while (bnx2x_link_test(bp) && cnt--) + msleep(10); +} + +static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) +{ + unsigned int pkt_size, num_pkts, i; + struct sk_buff *skb; + unsigned char *packet; + struct bnx2x_fastpath *fp_rx = &bp->fp[0]; + struct bnx2x_fastpath *fp_tx = &bp->fp[0]; + u16 tx_start_idx, tx_idx; + u16 rx_start_idx, rx_idx; + u16 pkt_prod, bd_prod; + struct sw_tx_bd *tx_buf; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_parse_bd *pbd = NULL; + dma_addr_t mapping; + union eth_rx_cqe *cqe; + u8 cqe_fp_flags; + struct sw_rx_bd *rx_buf; + u16 len; + int rc = -ENODEV; + + /* check the loopback mode */ + switch (loopback_mode) { + case BNX2X_PHY_LOOPBACK: + if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10) + return -EINVAL; + break; + case BNX2X_MAC_LOOPBACK: + bp->link_params.loopback_mode = LOOPBACK_BMAC; + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + break; + default: + return -EINVAL; + } + + /* prepare the loopback packet */ + pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? + bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); + skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + if (!skb) { + rc = -ENOMEM; + goto test_loopback_exit; + } + packet = skb_put(skb, pkt_size); + memcpy(packet, bp->dev->dev_addr, ETH_ALEN); + memset(packet + ETH_ALEN, 0, ETH_ALEN); + memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); + for (i = ETH_HLEN; i < pkt_size; i++) + packet[i] = (unsigned char) (i & 0xff); + + /* send the loopback packet */ + num_pkts = 0; + tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb); + rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); + + pkt_prod = fp_tx->tx_pkt_prod++; + tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)]; + tx_buf->first_bd = fp_tx->tx_bd_prod; + tx_buf->skb = skb; + tx_buf->flags = 0; + + bd_prod = TX_BD(fp_tx->tx_bd_prod); + tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ + tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); + tx_start_bd->vlan = cpu_to_le16(pkt_prod); + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + tx_start_bd->general_data = ((UNICAST_ADDRESS << + ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); + + /* turn on parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd; + + memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); + + wmb(); + + fp_tx->tx_db.data.prod += 2; + barrier(); + DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); + + mmiowb(); + + num_pkts++; + fp_tx->tx_bd_prod += 2; /* start + pbd */ + + udelay(100); + + tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb); + if (tx_idx != tx_start_idx + num_pkts) + goto test_loopback_exit; + + rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); + if (rx_idx != rx_start_idx + num_pkts) + goto test_loopback_exit; + + cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; + cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; + if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) + goto test_loopback_rx_exit; + + len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); + if (len != pkt_size) + goto test_loopback_rx_exit; + + rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; + skb = rx_buf->skb; + skb_reserve(skb, cqe->fast_path_cqe.placement_offset); + for (i = ETH_HLEN; i < pkt_size; i++) + if (*(skb->data + i) != (unsigned char) (i & 0xff)) + goto test_loopback_rx_exit; + + rc = 0; + +test_loopback_rx_exit: + + fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); + fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); + fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); + fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); + + /* Update producers */ + bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, + fp_rx->rx_sge_prod); + +test_loopback_exit: + bp->link_params.loopback_mode = LOOPBACK_NONE; + + return rc; +} + +static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) +{ + int rc = 0, res; + + if (BP_NOMCP(bp)) + return rc; + + if (!netif_running(bp->dev)) + return BNX2X_LOOPBACK_FAILED; + + bnx2x_netif_stop(bp, 1); + bnx2x_acquire_phy_lock(bp); + + res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up); + if (res) { + DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); + rc |= BNX2X_PHY_LOOPBACK_FAILED; + } + + res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up); + if (res) { + DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); + rc |= BNX2X_MAC_LOOPBACK_FAILED; + } + + bnx2x_release_phy_lock(bp); + bnx2x_netif_start(bp); + + return rc; +} + +#define CRC32_RESIDUAL 0xdebb20e3 + +static int bnx2x_test_nvram(struct bnx2x *bp) +{ + static const struct { + int offset; + int size; + } nvram_tbl[] = { + { 0, 0x14 }, /* bootstrap */ + { 0x14, 0xec }, /* dir */ + { 0x100, 0x350 }, /* manuf_info */ + { 0x450, 0xf0 }, /* feature_info */ + { 0x640, 0x64 }, /* upgrade_key_info */ + { 0x6a4, 0x64 }, + { 0x708, 0x70 }, /* manuf_key_info */ + { 0x778, 0x70 }, + { 0, 0 } + }; + __be32 buf[0x350 / 4]; + u8 *data = (u8 *)buf; + int i, rc; + u32 magic, crc; + + if (BP_NOMCP(bp)) + return 0; + + rc = bnx2x_nvram_read(bp, 0, data, 4); + if (rc) { + DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); + goto test_nvram_exit; + } + + magic = be32_to_cpu(buf[0]); + if (magic != 0x669955aa) { + DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); + rc = -ENODEV; + goto test_nvram_exit; + } + + for (i = 0; nvram_tbl[i].size; i++) { + + rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, + nvram_tbl[i].size); + if (rc) { + DP(NETIF_MSG_PROBE, + "nvram_tbl[%d] read data (rc %d)\n", i, rc); + goto test_nvram_exit; + } + + crc = ether_crc_le(nvram_tbl[i].size, data); + if (crc != CRC32_RESIDUAL) { + DP(NETIF_MSG_PROBE, + "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); + rc = -ENODEV; + goto test_nvram_exit; + } + } + +test_nvram_exit: + return rc; +} + +static int bnx2x_test_intr(struct bnx2x *bp) +{ + struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); + int i, rc; + + if (!netif_running(bp->dev)) + return -ENODEV; + + config->hdr.length = 0; + if (CHIP_IS_E1(bp)) + /* use last unicast entries */ + config->hdr.offset = (BP_PORT(bp) ? 63 : 31); + else + config->hdr.offset = BP_FUNC(bp); + config->hdr.client_id = bp->fp->cl_id; + config->hdr.reserved1 = 0; + + bp->set_mac_pending++; + smp_wmb(); + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, + U64_HI(bnx2x_sp_mapping(bp, mac_config)), + U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); + if (rc == 0) { + for (i = 0; i < 10; i++) { + if (!bp->set_mac_pending) + break; + smp_rmb(); + msleep_interruptible(10); + } + if (i == 10) + rc = -ENODEV; + } + + return rc; +} + +static void bnx2x_self_test(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + + memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); + + if (!netif_running(dev)) + return; + + /* offline tests are not supported in MF mode */ + if (IS_E1HMF(bp)) + etest->flags &= ~ETH_TEST_FL_OFFLINE; + + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int port = BP_PORT(bp); + u32 val; + u8 link_up; + + /* save current value of input enable for TX port IF */ + val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); + /* disable input for TX port IF */ + REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); + + link_up = (bnx2x_link_test(bp) == 0); + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_load(bp, LOAD_DIAG); + /* wait until link state is restored */ + bnx2x_wait_for_link(bp, link_up); + + if (bnx2x_test_registers(bp) != 0) { + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2x_test_memory(bp) != 0) { + buf[1] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + buf[2] = bnx2x_test_loopback(bp, link_up); + if (buf[2] != 0) + etest->flags |= ETH_TEST_FL_FAILED; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + + /* restore input for TX port IF */ + REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); + + bnx2x_nic_load(bp, LOAD_NORMAL); + /* wait until link state is restored */ + bnx2x_wait_for_link(bp, link_up); + } + if (bnx2x_test_nvram(bp) != 0) { + buf[3] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2x_test_intr(bp) != 0) { + buf[4] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bp->port.pmf) + if (bnx2x_link_test(bp) != 0) { + buf[5] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + +#ifdef BNX2X_EXTRA_DEBUG + bnx2x_panic_dump(bp); +#endif +} + +static const struct { + long offset; + int size; + u8 string[ETH_GSTRING_LEN]; +} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = { +/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" }, + { Q_STATS_OFFSET32(error_bytes_received_hi), + 8, "[%d]: rx_error_bytes" }, + { Q_STATS_OFFSET32(total_unicast_packets_received_hi), + 8, "[%d]: rx_ucast_packets" }, + { Q_STATS_OFFSET32(total_multicast_packets_received_hi), + 8, "[%d]: rx_mcast_packets" }, + { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), + 8, "[%d]: rx_bcast_packets" }, + { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" }, + { Q_STATS_OFFSET32(rx_err_discard_pkt), + 4, "[%d]: rx_phy_ip_err_discards"}, + { Q_STATS_OFFSET32(rx_skb_alloc_failed), + 4, "[%d]: rx_skb_alloc_discard" }, + { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" }, + +/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, + { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), + 8, "[%d]: tx_ucast_packets" }, + { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), + 8, "[%d]: tx_mcast_packets" }, + { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), + 8, "[%d]: tx_bcast_packets" } +}; + +static const struct { + long offset; + int size; + u32 flags; +#define STATS_FLAGS_PORT 1 +#define STATS_FLAGS_FUNC 2 +#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) + u8 string[ETH_GSTRING_LEN]; +} bnx2x_stats_arr[BNX2X_NUM_STATS] = { +/* 1 */ { STATS_OFFSET32(total_bytes_received_hi), + 8, STATS_FLAGS_BOTH, "rx_bytes" }, + { STATS_OFFSET32(error_bytes_received_hi), + 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, + { STATS_OFFSET32(total_unicast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, + { STATS_OFFSET32(total_multicast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, + { STATS_OFFSET32(total_broadcast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, + { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), + 8, STATS_FLAGS_PORT, "rx_crc_errors" }, + { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), + 8, STATS_FLAGS_PORT, "rx_align_errors" }, + { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), + 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, + { STATS_OFFSET32(etherstatsoverrsizepkts_hi), + 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, +/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), + 8, STATS_FLAGS_PORT, "rx_fragments" }, + { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), + 8, STATS_FLAGS_PORT, "rx_jabbers" }, + { STATS_OFFSET32(no_buff_discard_hi), + 8, STATS_FLAGS_BOTH, "rx_discards" }, + { STATS_OFFSET32(mac_filter_discard), + 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, + { STATS_OFFSET32(xxoverflow_discard), + 4, STATS_FLAGS_PORT, "rx_fw_discards" }, + { STATS_OFFSET32(brb_drop_hi), + 8, STATS_FLAGS_PORT, "rx_brb_discard" }, + { STATS_OFFSET32(brb_truncate_hi), + 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, + { STATS_OFFSET32(pause_frames_received_hi), + 8, STATS_FLAGS_PORT, "rx_pause_frames" }, + { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), + 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, + { STATS_OFFSET32(nig_timer_max), + 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, +/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), + 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, + { STATS_OFFSET32(rx_skb_alloc_failed), + 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, + { STATS_OFFSET32(hw_csum_err), + 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, + + { STATS_OFFSET32(total_bytes_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_bytes" }, + { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), + 8, STATS_FLAGS_PORT, "tx_error_bytes" }, + { STATS_OFFSET32(total_unicast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, + { STATS_OFFSET32(total_multicast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, + { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, + { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), + 8, STATS_FLAGS_PORT, "tx_mac_errors" }, + { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), + 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, +/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), + 8, STATS_FLAGS_PORT, "tx_single_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), + 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), + 8, STATS_FLAGS_PORT, "tx_deferred" }, + { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), + 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), + 8, STATS_FLAGS_PORT, "tx_late_collisions" }, + { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), + 8, STATS_FLAGS_PORT, "tx_total_collisions" }, + { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), + 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), + 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), + 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), + 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, +/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), + 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, + { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), + 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, + { STATS_OFFSET32(etherstatspktsover1522octets_hi), + 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, + { STATS_OFFSET32(pause_frames_sent_hi), + 8, STATS_FLAGS_PORT, "tx_pause_frames" } +}; + +#define IS_PORT_STAT(i) \ + ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) +#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) +#define IS_E1HMF_MODE_STAT(bp) \ + (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) + +static int bnx2x_get_sset_count(struct net_device *dev, int stringset) +{ + struct bnx2x *bp = netdev_priv(dev); + int i, num_stats; + + switch (stringset) { + case ETH_SS_STATS: + if (is_multi(bp)) { + num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; + if (!IS_E1HMF_MODE_STAT(bp)) + num_stats += BNX2X_NUM_STATS; + } else { + if (IS_E1HMF_MODE_STAT(bp)) { + num_stats = 0; + for (i = 0; i < BNX2X_NUM_STATS; i++) + if (IS_FUNC_STAT(i)) + num_stats++; + } else + num_stats = BNX2X_NUM_STATS; + } + return num_stats; + + case ETH_SS_TEST: + return BNX2X_NUM_TESTS; + + default: + return -EINVAL; + } +} + +static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + int i, j, k; + + switch (stringset) { + case ETH_SS_STATS: + if (is_multi(bp)) { + k = 0; + for_each_queue(bp, i) { + for (j = 0; j < BNX2X_NUM_Q_STATS; j++) + sprintf(buf + (k + j)*ETH_GSTRING_LEN, + bnx2x_q_stats_arr[j].string, i); + k += BNX2X_NUM_Q_STATS; + } + if (IS_E1HMF_MODE_STAT(bp)) + break; + for (j = 0; j < BNX2X_NUM_STATS; j++) + strcpy(buf + (k + j)*ETH_GSTRING_LEN, + bnx2x_stats_arr[j].string); + } else { + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) + continue; + strcpy(buf + j*ETH_GSTRING_LEN, + bnx2x_stats_arr[i].string); + j++; + } + } + break; + + case ETH_SS_TEST: + memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); + break; + } +} + +static void bnx2x_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 *hw_stats, *offset; + int i, j, k; + + if (is_multi(bp)) { + k = 0; + for_each_queue(bp, i) { + hw_stats = (u32 *)&bp->fp[i].eth_q_stats; + for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { + if (bnx2x_q_stats_arr[j].size == 0) { + /* skip this counter */ + buf[k + j] = 0; + continue; + } + offset = (hw_stats + + bnx2x_q_stats_arr[j].offset); + if (bnx2x_q_stats_arr[j].size == 4) { + /* 4-byte counter */ + buf[k + j] = (u64) *offset; + continue; + } + /* 8-byte counter */ + buf[k + j] = HILO_U64(*offset, *(offset + 1)); + } + k += BNX2X_NUM_Q_STATS; + } + if (IS_E1HMF_MODE_STAT(bp)) + return; + hw_stats = (u32 *)&bp->eth_stats; + for (j = 0; j < BNX2X_NUM_STATS; j++) { + if (bnx2x_stats_arr[j].size == 0) { + /* skip this counter */ + buf[k + j] = 0; + continue; + } + offset = (hw_stats + bnx2x_stats_arr[j].offset); + if (bnx2x_stats_arr[j].size == 4) { + /* 4-byte counter */ + buf[k + j] = (u64) *offset; + continue; + } + /* 8-byte counter */ + buf[k + j] = HILO_U64(*offset, *(offset + 1)); + } + } else { + hw_stats = (u32 *)&bp->eth_stats; + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) + continue; + if (bnx2x_stats_arr[i].size == 0) { + /* skip this counter */ + buf[j] = 0; + j++; + continue; + } + offset = (hw_stats + bnx2x_stats_arr[i].offset); + if (bnx2x_stats_arr[i].size == 4) { + /* 4-byte counter */ + buf[j] = (u64) *offset; + j++; + continue; + } + /* 8-byte counter */ + buf[j] = HILO_U64(*offset, *(offset + 1)); + j++; + } + } +} + +static int bnx2x_phys_id(struct net_device *dev, u32 data) +{ + struct bnx2x *bp = netdev_priv(dev); + int i; + + if (!netif_running(dev)) + return 0; + + if (!bp->port.pmf) + return 0; + + if (data == 0) + data = 2; + + for (i = 0; i < (data * 2); i++) { + if ((i % 2) == 0) + bnx2x_set_led(&bp->link_params, LED_MODE_OPER, + SPEED_1000); + else + bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0); + + msleep_interruptible(500); + if (signal_pending(current)) + break; + } + + if (bp->link_vars.link_up) + bnx2x_set_led(&bp->link_params, LED_MODE_OPER, + bp->link_vars.line_speed); + + return 0; +} + +static const struct ethtool_ops bnx2x_ethtool_ops = { + .get_settings = bnx2x_get_settings, + .set_settings = bnx2x_set_settings, + .get_drvinfo = bnx2x_get_drvinfo, + .get_regs_len = bnx2x_get_regs_len, + .get_regs = bnx2x_get_regs, + .get_wol = bnx2x_get_wol, + .set_wol = bnx2x_set_wol, + .get_msglevel = bnx2x_get_msglevel, + .set_msglevel = bnx2x_set_msglevel, + .nway_reset = bnx2x_nway_reset, + .get_link = bnx2x_get_link, + .get_eeprom_len = bnx2x_get_eeprom_len, + .get_eeprom = bnx2x_get_eeprom, + .set_eeprom = bnx2x_set_eeprom, + .get_coalesce = bnx2x_get_coalesce, + .set_coalesce = bnx2x_set_coalesce, + .get_ringparam = bnx2x_get_ringparam, + .set_ringparam = bnx2x_set_ringparam, + .get_pauseparam = bnx2x_get_pauseparam, + .set_pauseparam = bnx2x_set_pauseparam, + .get_rx_csum = bnx2x_get_rx_csum, + .set_rx_csum = bnx2x_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .set_flags = bnx2x_set_flags, + .get_flags = ethtool_op_get_flags, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = bnx2x_set_tso, + .self_test = bnx2x_self_test, + .get_sset_count = bnx2x_get_sset_count, + .get_strings = bnx2x_get_strings, + .phys_id = bnx2x_phys_id, + .get_ethtool_stats = bnx2x_get_ethtool_stats, +}; + +void bnx2x_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); +} diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h similarity index 100% rename from drivers/net/bnx2x_fw_defs.h rename to drivers/net/bnx2x/bnx2x_fw_defs.h diff --git a/drivers/net/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h similarity index 100% rename from drivers/net/bnx2x_fw_file_hdr.h rename to drivers/net/bnx2x/bnx2x_fw_file_hdr.h diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h similarity index 100% rename from drivers/net/bnx2x_hsi.h rename to drivers/net/bnx2x/bnx2x_hsi.h diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h similarity index 100% rename from drivers/net/bnx2x_init.h rename to drivers/net/bnx2x/bnx2x_init.h diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h similarity index 100% rename from drivers/net/bnx2x_init_ops.h rename to drivers/net/bnx2x/bnx2x_init_ops.h diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c similarity index 99% rename from drivers/net/bnx2x_link.c rename to drivers/net/bnx2x/bnx2x_link.c index ff70be898765d79b5ca45e83332fb4bd810de948..0383e30663135355d9de8ea79bc7b926fe162a7d 100644 --- a/drivers/net/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -4266,14 +4266,16 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) MDIO_PMA_REG_10G_CTRL2, 0x0008); } - /* Set 2-wire transfer rate to 400Khz since 100Khz - is not operational */ + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ bnx2x_cl45_write(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, - 0xa101); + 0xa001); /* Set TX PreEmphasis if needed */ if ((params->feature_config_flags & diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h similarity index 100% rename from drivers/net/bnx2x_link.h rename to drivers/net/bnx2x/bnx2x_link.h diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c similarity index 56% rename from drivers/net/bnx2x_main.c rename to drivers/net/bnx2x/bnx2x_main.c index 46167c0817274b793348039ea50db4754003b98e..b4ec2b02a465cf7d822f23700c119d6e93a2121b 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -51,15 +51,12 @@ #include #include - +#define BNX2X_MAIN #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" -#include "bnx2x_dump.h" +#include "bnx2x_cmn.h" -#define DRV_MODULE_VERSION "1.52.53-2" -#define DRV_MODULE_RELDATE "2010/21/07" -#define BNX2X_BC_VER 0x040200 #include #include "bnx2x_fw_file_hdr.h" @@ -121,8 +118,6 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Default debug msglevel"); -static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ - static struct workqueue_struct *bnx2x_wq; enum bnx2x_board_type { @@ -177,7 +172,7 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) return val; } -static const u32 dmae_reg_go_c[] = { +const u32 dmae_reg_go_c[] = { DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, @@ -185,8 +180,7 @@ static const u32 dmae_reg_go_c[] = { }; /* copy command into DMAE command memory and set DMAE command go */ -static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, - int idx) +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) { u32 cmd_offset; int i; @@ -541,7 +535,7 @@ static void bnx2x_fw_dump(struct bnx2x *bp) pr_err("end of fw dump\n"); } -static void bnx2x_panic_dump(struct bnx2x *bp) +void bnx2x_panic_dump(struct bnx2x *bp) { int i; u16 j, start, end; @@ -654,7 +648,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) BNX2X_ERR("end crash dump -----------------\n"); } -static void bnx2x_int_enable(struct bnx2x *bp) +void bnx2x_int_enable(struct bnx2x *bp) { int port = BP_PORT(bp); u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; @@ -736,7 +730,7 @@ static void bnx2x_int_disable(struct bnx2x *bp) BNX2X_ERR("BUG! proper val not read from IGU!\n"); } -static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) { int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; int i, offset; @@ -806,235 +800,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) return false; } -static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, - u8 storm, u16 index, u8 op, u8 update) -{ - u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + - COMMAND_REG_INT_ACK); - struct igu_ack_register igu_ack; - - igu_ack.status_block_index = index; - igu_ack.sb_id_and_flags = - ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | - (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | - (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | - (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); - - DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", - (*(u32 *)&igu_ack), hc_addr); - REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); - - /* Make sure that ACK is written */ - mmiowb(); - barrier(); -} - -static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) -{ - struct host_status_block *fpsb = fp->status_blk; - - barrier(); /* status block is written to by the chip */ - fp->fp_c_idx = fpsb->c_status_block.status_block_index; - fp->fp_u_idx = fpsb->u_status_block.status_block_index; -} - -static u16 bnx2x_ack_int(struct bnx2x *bp) -{ - u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + - COMMAND_REG_SIMD_MASK); - u32 result = REG_RD(bp, hc_addr); - - DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", - result, hc_addr); - - return result; -} - - -/* - * fast path service functions - */ - -static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) -{ - /* Tell compiler that consumer and producer can change */ - barrier(); - return (fp->tx_pkt_prod != fp->tx_pkt_cons); -} - -/* free skb in the packet ring at pos idx - * return idx of last bd freed - */ -static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 idx) -{ - struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; - struct eth_tx_start_bd *tx_start_bd; - struct eth_tx_bd *tx_data_bd; - struct sk_buff *skb = tx_buf->skb; - u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; - int nbd; - - /* prefetch skb end pointer to speedup dev_kfree_skb() */ - prefetch(&skb->end); - - DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", - idx, tx_buf, skb); - - /* unmap first bd */ - DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); - tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), - BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); - - nbd = le16_to_cpu(tx_start_bd->nbd) - 1; -#ifdef BNX2X_STOP_ON_ERROR - if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { - BNX2X_ERR("BAD nbd!\n"); - bnx2x_panic(); - } -#endif - new_cons = nbd + tx_buf->first_bd; - - /* Get the next bd */ - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - - /* Skip a parse bd... */ - --nbd; - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - - /* ...and the TSO split header bd since they have no mapping */ - if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { - --nbd; - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - } - - /* now free frags */ - while (nbd > 0) { - - DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); - tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; - dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - if (--nbd) - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - } - - /* release skb */ - WARN_ON(!skb); - dev_kfree_skb(skb); - tx_buf->first_bd = 0; - tx_buf->skb = NULL; - - return new_cons; -} - -static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) -{ - s16 used; - u16 prod; - u16 cons; - - prod = fp->tx_bd_prod; - cons = fp->tx_bd_cons; - - /* NUM_TX_RINGS = number of "next-page" entries - It will be used as a threshold */ - used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; - -#ifdef BNX2X_STOP_ON_ERROR - WARN_ON(used < 0); - WARN_ON(used > fp->bp->tx_ring_size); - WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); -#endif - - return (s16)(fp->bp->tx_ring_size) - used; -} - -static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) -{ - u16 hw_cons; - - /* Tell compiler that status block fields can change */ - barrier(); - hw_cons = le16_to_cpu(*fp->tx_cons_sb); - return hw_cons != fp->tx_pkt_cons; -} - -static int bnx2x_tx_int(struct bnx2x_fastpath *fp) -{ - struct bnx2x *bp = fp->bp; - struct netdev_queue *txq; - u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return -1; -#endif - - txq = netdev_get_tx_queue(bp->dev, fp->index); - hw_cons = le16_to_cpu(*fp->tx_cons_sb); - sw_cons = fp->tx_pkt_cons; - - while (sw_cons != hw_cons) { - u16 pkt_cons; - - pkt_cons = TX_BD(sw_cons); - - /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ - - DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", - hw_cons, sw_cons, pkt_cons); - -/* if (NEXT_TX_IDX(sw_cons) != hw_cons) { - rmb(); - prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); - } -*/ - bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); - sw_cons++; - } - - fp->tx_pkt_cons = sw_cons; - fp->tx_bd_cons = bd_cons; - - /* Need to make the tx_bd_cons update visible to start_xmit() - * before checking for netif_tx_queue_stopped(). Without the - * memory barrier, there is a small possibility that - * start_xmit() will miss it and cause the queue to be stopped - * forever. - */ - smp_mb(); - - /* TBD need a thresh? */ - if (unlikely(netif_tx_queue_stopped(txq))) { - /* Taking tx_lock() is needed to prevent reenabling the queue - * while it's empty. This could have happen if rx_action() gets - * suspended in bnx2x_tx_int() after the condition before - * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): - * - * stops the queue->sees fresh tx_bd_cons->releases the queue-> - * sends some packets consuming the whole queue again-> - * stops the queue - */ - - __netif_tx_lock(txq, smp_processor_id()); - - if ((netif_tx_queue_stopped(txq)) && - (bp->state == BNX2X_STATE_OPEN) && - (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) - netif_tx_wake_queue(txq); - - __netif_tx_unlock(txq); - } - return 0; -} #ifdef BCM_CNIC static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); #endif -static void bnx2x_sp_event(struct bnx2x_fastpath *fp, +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) { struct bnx2x *bp = fp->bp; @@ -1118,11414 +889,5906 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, mb(); /* force bnx2x_wait_ramrod() to see the change */ } -static inline void bnx2x_free_rx_sge(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; - struct page *page = sw_buf->page; - struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; - - /* Skip "next page" elements */ - if (!page) - return; - - dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); - __free_pages(page, PAGES_PER_SGE_SHIFT); - - sw_buf->page = NULL; - sge->addr_hi = 0; - sge->addr_lo = 0; -} - -static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, - struct bnx2x_fastpath *fp, int last) +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) { + struct bnx2x *bp = netdev_priv(dev_instance); + u16 status = bnx2x_ack_int(bp); + u16 mask; int i; - for (i = 0; i < last; i++) - bnx2x_free_rx_sge(bp, fp, i); -} - -static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); - struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; - struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; - dma_addr_t mapping; - - if (unlikely(page == NULL)) - return -ENOMEM; + /* Return here if interrupt is shared and it's not for us */ + if (unlikely(status == 0)) { + DP(NETIF_MSG_INTR, "not our interrupt!\n"); + return IRQ_NONE; + } + DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); - mapping = dma_map_page(&bp->pdev->dev, page, 0, - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - __free_pages(page, PAGES_PER_SGE_SHIFT); - return -ENOMEM; + /* Return here if interrupt is disabled */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) { + DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); + return IRQ_HANDLED; } - sw_buf->page = page; - dma_unmap_addr_set(sw_buf, mapping, mapping); +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif - sge->addr_hi = cpu_to_le32(U64_HI(mapping)); - sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { + struct bnx2x_fastpath *fp = &bp->fp[i]; - return 0; -} + mask = 0x2 << fp->sb_id; + if (status & mask) { + /* Handle Rx and Tx according to SB id */ + prefetch(fp->rx_cons_sb); + prefetch(&fp->status_blk->u_status_block. + status_block_index); + prefetch(fp->tx_cons_sb); + prefetch(&fp->status_blk->c_status_block. + status_block_index); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + status &= ~mask; + } + } -static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct sk_buff *skb; - struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; - struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; - dma_addr_t mapping; +#ifdef BCM_CNIC + mask = 0x2 << CNIC_SB_ID(bp); + if (status & (mask | 0x1)) { + struct cnic_ops *c_ops = NULL; - skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); - if (unlikely(skb == NULL)) - return -ENOMEM; + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); - mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - dev_kfree_skb(skb); - return -ENOMEM; + status &= ~mask; } +#endif - rx_buf->skb = skb; - dma_unmap_addr_set(rx_buf, mapping, mapping); + if (unlikely(status & 0x1)) { + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); - rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + status &= ~0x1; + if (!status) + return IRQ_HANDLED; + } - return 0; -} + if (unlikely(status)) + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", + status); -/* note that we are not allocating a new skb, - * we are just moving one from cons to prod - * we are not creating a new mapping, - * so there is no need to check for dma_mapping_error(). - */ -static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, - struct sk_buff *skb, u16 cons, u16 prod) -{ - struct bnx2x *bp = fp->bp; - struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; - struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; - struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; - struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + return IRQ_HANDLED; +} - dma_sync_single_for_device(&bp->pdev->dev, - dma_unmap_addr(cons_rx_buf, mapping), - RX_COPY_THRESH, DMA_FROM_DEVICE); +/* end of fast path */ - prod_rx_buf->skb = cons_rx_buf->skb; - dma_unmap_addr_set(prod_rx_buf, mapping, - dma_unmap_addr(cons_rx_buf, mapping)); - *prod_bd = *cons_bd; -} -static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, - u16 idx) -{ - u16 last_max = fp->last_max_sge; +/* Link */ - if (SUB_S16(idx, last_max) > 0) - fp->last_max_sge = idx; -} +/* + * General service functions + */ -static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) { - int i, j; - - for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { - int idx = RX_SGE_CNT * i - 1; + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + int cnt; - for (j = 0; j < 2; j++) { - SGE_MASK_CLEAR_BIT(fp, idx); - idx--; - } + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; } -} - -static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, - struct eth_fast_path_rx_cqe *fp_cqe) -{ - struct bnx2x *bp = fp->bp; - u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - - le16_to_cpu(fp_cqe->len_on_bd)) >> - SGE_PAGE_SHIFT; - u16 last_max, last_elem, first_elem; - u16 delta = 0; - u16 i; - if (!sge_len) - return; + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + } - /* First mark all used pages */ - for (i = 0; i < sge_len; i++) - SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); + /* Validating that the resource is not already taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) { + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", + lock_status, resource_bit); + return -EEXIST; + } - DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", - sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); + /* Try for 5 second every 5ms */ + for (cnt = 0; cnt < 1000; cnt++) { + /* Try to acquire the lock */ + REG_WR(bp, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) + return 0; - /* Here we assume that the last SGE index is the biggest */ - prefetch((void *)(fp->sge_mask)); - bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); + msleep(5); + } + DP(NETIF_MSG_HW, "Timeout\n"); + return -EAGAIN; +} - last_max = RX_SGE(fp->last_max_sge); - last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; - first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; - /* If ring is not full */ - if (last_elem + 1 != first_elem) - last_elem++; + DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); - /* Now update the prod */ - for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { - if (likely(fp->sge_mask[i])) - break; + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; + } - fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; - delta += RX_SGE_MASK_ELEM_SZ; + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } - if (delta > 0) { - fp->rx_sge_prod += delta; - /* clear page-end entries */ - bnx2x_clear_sge_mask_next_elems(fp); + /* Validating that the resource is currently taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (!(lock_status & resource_bit)) { + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", + lock_status, resource_bit); + return -EFAULT; } - DP(NETIF_MSG_RX_STATUS, - "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", - fp->last_max_sge, fp->rx_sge_prod); + REG_WR(bp, hw_lock_control_reg, resource_bit); + return 0; } -static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) -{ - /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ - memset(fp->sge_mask, 0xff, - (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); - - /* Clear the two last indices in the page to 1: - these are the indices that correspond to the "next" element, - hence will never be indicated and should be removed from - the calculations. */ - bnx2x_clear_sge_mask_next_elems(fp); -} -static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, - struct sk_buff *skb, u16 cons, u16 prod) +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) { - struct bnx2x *bp = fp->bp; - struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; - struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; - struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; - dma_addr_t mapping; - - /* move empty skb from pool to prod and map it */ - prod_rx_buf->skb = fp->tpa_pool[queue].skb; - mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, - bp->rx_buf_size, DMA_FROM_DEVICE); - dma_unmap_addr_set(prod_rx_buf, mapping, mapping); - - /* move partial skb from cons to pool (don't unmap yet) */ - fp->tpa_pool[queue] = *cons_rx_buf; - - /* mark bin state as start - print error if current state != stop */ - if (fp->tpa_state[queue] != BNX2X_TPA_STOP) - BNX2X_ERR("start of bin not in stop [%d]\n", queue); - - fp->tpa_state[queue] = BNX2X_TPA_START; - - /* point prod_bd to new skb */ - prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - -#ifdef BNX2X_STOP_ON_ERROR - fp->tpa_queue_used |= (1 << queue); -#ifdef _ASM_GENERIC_INT_L64_H - DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", -#else - DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", -#endif - fp->tpa_queue_used); -#endif -} - -static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, - struct sk_buff *skb, - struct eth_fast_path_rx_cqe *fp_cqe, - u16 cqe_idx) -{ - struct sw_rx_page *rx_pg, old_rx_pg; - u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); - u32 i, frag_len, frag_size, pages; - int err; - int j; - - frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; - pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; - - /* This is needed in order to enable forwarding support */ - if (frag_size) - skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, - max(frag_size, (u32)len_on_bd)); + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + int value; -#ifdef BNX2X_STOP_ON_ERROR - if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { - BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", - pages, cqe_idx); - BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", - fp_cqe->pkt_len, len_on_bd); - bnx2x_panic(); + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); return -EINVAL; } -#endif - - /* Run through the SGL and compose the fragmented skb */ - for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { - u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); - - /* FW gives the indices of the SGE as if the ring is an array - (meaning that "next" element will consume 2 indices) */ - frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); - rx_pg = &fp->rx_page_ring[sge_idx]; - old_rx_pg = *rx_pg; - - /* If we fail to allocate a substitute page, we simply stop - where we are and drop the whole packet */ - err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); - if (unlikely(err)) { - fp->eth_q_stats.rx_skb_alloc_failed++; - return err; - } - /* Unmap the page as we r going to pass it to the stack */ - dma_unmap_page(&bp->pdev->dev, - dma_unmap_addr(&old_rx_pg, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - - /* Add one frag and update the appropriate fields in the skb */ - skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + /* read GPIO value */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); - skb->data_len += frag_len; - skb->truesize += frag_len; - skb->len += frag_len; + /* get the requested pin value */ + if ((gpio_reg & gpio_mask) == gpio_mask) + value = 1; + else + value = 0; - frag_size -= frag_len; - } + DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); - return 0; + return value; } -static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 queue, int pad, int len, union eth_rx_cqe *cqe, - u16 cqe_idx) +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) { - struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; - struct sk_buff *skb = rx_buf->skb; - /* alloc new skb */ - struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); - - /* Unmap skb in the pool anyway, as we are going to change - pool entry status to BNX2X_TPA_STOP even if new skb allocation - fails. */ - dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, DMA_FROM_DEVICE); - - if (likely(new_skb)) { - /* fix ip xsum and give it to the stack */ - /* (no need to map the new skb) */ -#ifdef BCM_VLAN - int is_vlan_cqe = - (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & - PARSING_FLAGS_VLAN); - int is_not_hwaccel_vlan_cqe = - (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG))); -#endif + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; - prefetch(skb); - prefetch(((char *)(skb)) + 128); + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } -#ifdef BNX2X_STOP_ON_ERROR - if (pad + len > bp->rx_buf_size) { - BNX2X_ERR("skb_put is about to fail... " - "pad %d len %d rx_buf_size %d\n", - pad, len, bp->rx_buf_size); - bnx2x_panic(); - return; - } -#endif + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); - skb_reserve(skb, pad); - skb_put(skb, len); + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", + gpio_num, gpio_shift); + /* clear FLOAT and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); + break; - skb->protocol = eth_type_trans(skb, bp->dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", + gpio_num, gpio_shift); + /* clear FLOAT and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); + break; - { - struct iphdr *iph; + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", + gpio_num, gpio_shift); + /* set FLOAT */ + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + break; - iph = (struct iphdr *)skb->data; -#ifdef BCM_VLAN - /* If there is no Rx VLAN offloading - - take VLAN tag into an account */ - if (unlikely(is_not_hwaccel_vlan_cqe)) - iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN); -#endif - iph->check = 0; - iph->check = ip_fast_csum((u8 *)iph, iph->ihl); - } + default: + break; + } - if (!bnx2x_fill_frag_skb(bp, fp, skb, - &cqe->fast_path_cqe, cqe_idx)) { -#ifdef BCM_VLAN - if ((bp->vlgrp != NULL) && is_vlan_cqe && - (!is_not_hwaccel_vlan_cqe)) - vlan_gro_receive(&fp->napi, bp->vlgrp, - le16_to_cpu(cqe->fast_path_cqe. - vlan_tag), skb); - else -#endif - napi_gro_receive(&fp->napi, skb); - } else { - DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" - " - dropping packet!\n"); - dev_kfree_skb(skb); - } + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + return 0; +} - /* put new skb in bin */ - fp->tpa_pool[queue].skb = new_skb; +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; - } else { - /* else drop the packet and keep the buffer in the bin */ - DP(NETIF_MSG_RX_STATUS, - "Failed to allocate new skb - dropping packet!\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; } - fp->tpa_state[queue] = BNX2X_TPA_STOP; -} - -static inline void bnx2x_update_rx_prod(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, - u16 rx_sge_prod) -{ - struct ustorm_eth_rx_producers rx_prods = {0}; - int i; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO int */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); - /* Update producers */ - rx_prods.bd_prod = bd_prod; - rx_prods.cqe_prod = rx_comp_prod; - rx_prods.sge_prod = rx_sge_prod; + switch (mode) { + case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: + DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " + "output low\n", gpio_num, gpio_shift); + /* clear SET and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + break; - /* - * Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes BDs must have buffers. - */ - wmb(); + case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: + DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " + "output high\n", gpio_num, gpio_shift); + /* clear CLR and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + break; - for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, - ((u32 *)&rx_prods)[i]); + default: + break; + } - mmiowb(); /* keep prod updates ordered */ + REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - DP(NETIF_MSG_RX_STATUS, - "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", - fp->index, bd_prod, rx_comp_prod, rx_sge_prod); + return 0; } -static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) +static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) { - struct bnx2x *bp = fp->bp; - u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; - u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; - int rx_pkt = 0; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return 0; -#endif - - /* CQ "next element" is of the size of the regular element, - that's why it's ok here */ - hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); - if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - hw_comp_cons++; - - bd_cons = fp->rx_bd_cons; - bd_prod = fp->rx_bd_prod; - bd_prod_fw = bd_prod; - sw_comp_cons = fp->rx_comp_cons; - sw_comp_prod = fp->rx_comp_prod; - - /* Memory barrier necessary as speculative reads of the rx - * buffer can be ahead of the index in the status block - */ - rmb(); + u32 spio_mask = (1 << spio_num); + u32 spio_reg; - DP(NETIF_MSG_RX_STATUS, - "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", - fp->index, hw_comp_cons, sw_comp_cons); - - while (sw_comp_cons != hw_comp_cons) { - struct sw_rx_bd *rx_buf = NULL; - struct sk_buff *skb; - union eth_rx_cqe *cqe; - u8 cqe_fp_flags, cqe_fp_status_flags; - u16 len, pad; - - comp_ring_cons = RCQ_BD(sw_comp_cons); - bd_prod = RX_BD(bd_prod); - bd_cons = RX_BD(bd_cons); - - /* Prefetch the page containing the BD descriptor - at producer's index. It will be needed when new skb is - allocated */ - prefetch((void *)(PAGE_ALIGN((unsigned long) - (&fp->rx_desc_ring[bd_prod])) - - PAGE_SIZE + 1)); - - cqe = &fp->rx_comp_ring[comp_ring_cons]; - cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; - cqe_fp_status_flags = cqe->fast_path_cqe.status_flags; - - DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" - " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), - cqe_fp_flags, cqe->fast_path_cqe.status_flags, - le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), - le16_to_cpu(cqe->fast_path_cqe.vlan_tag), - le16_to_cpu(cqe->fast_path_cqe.pkt_len)); - - /* is this a slowpath msg? */ - if (unlikely(CQE_TYPE(cqe_fp_flags))) { - bnx2x_sp_event(fp, cqe); - goto next_cqe; - - /* this is an rx packet */ - } else { - rx_buf = &fp->rx_buf_ring[bd_cons]; - skb = rx_buf->skb; - prefetch(skb); - len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); - pad = cqe->fast_path_cqe.placement_offset; - - /* If CQE is marked both TPA_START and TPA_END - it is a non-TPA CQE */ - if ((!fp->disable_tpa) && - (TPA_TYPE(cqe_fp_flags) != - (TPA_TYPE_START | TPA_TYPE_END))) { - u16 queue = cqe->fast_path_cqe.queue_index; - - if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { - DP(NETIF_MSG_RX_STATUS, - "calling tpa_start on queue %d\n", - queue); - - bnx2x_tpa_start(fp, queue, skb, - bd_cons, bd_prod); - goto next_rx; - } + if ((spio_num < MISC_REGISTERS_SPIO_4) || + (spio_num > MISC_REGISTERS_SPIO_7)) { + BNX2X_ERR("Invalid SPIO %d\n", spio_num); + return -EINVAL; + } - if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) { - DP(NETIF_MSG_RX_STATUS, - "calling tpa_stop on queue %d\n", - queue); - - if (!BNX2X_RX_SUM_FIX(cqe)) - BNX2X_ERR("STOP on none TCP " - "data\n"); - - /* This is a size of the linear data - on this skb */ - len = le16_to_cpu(cqe->fast_path_cqe. - len_on_bd); - bnx2x_tpa_stop(bp, fp, queue, pad, - len, cqe, comp_ring_cons); -#ifdef BNX2X_STOP_ON_ERROR - if (bp->panic) - return 0; -#endif + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + /* read SPIO and mask except the float bits */ + spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); - bnx2x_update_sge_prod(fp, - &cqe->fast_path_cqe); - goto next_cqe; - } - } + switch (mode) { + case MISC_REGISTERS_SPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); + /* clear FLOAT and set CLR */ + spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); + break; - dma_sync_single_for_device(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - pad + RX_COPY_THRESH, - DMA_FROM_DEVICE); - prefetch(((char *)(skb)) + 128); - - /* is this an error packet? */ - if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { - DP(NETIF_MSG_RX_ERR, - "ERROR flags %x rx packet %u\n", - cqe_fp_flags, sw_comp_cons); - fp->eth_q_stats.rx_err_discard_pkt++; - goto reuse_rx; - } + case MISC_REGISTERS_SPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); + /* clear FLOAT and set SET */ + spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); + break; - /* Since we don't have a jumbo ring - * copy small packets if mtu > 1500 - */ - if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && - (len <= RX_COPY_THRESH)) { - struct sk_buff *new_skb; - - new_skb = netdev_alloc_skb(bp->dev, - len + pad); - if (new_skb == NULL) { - DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped " - "because of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; - goto reuse_rx; - } + case MISC_REGISTERS_SPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); + /* set FLOAT */ + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + break; - /* aligned copy */ - skb_copy_from_linear_data_offset(skb, pad, - new_skb->data + pad, len); - skb_reserve(new_skb, pad); - skb_put(new_skb, len); + default: + break; + } - bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); + REG_WR(bp, MISC_REG_SPIO, spio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); - skb = new_skb; + return 0; +} - } else - if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, - DMA_FROM_DEVICE); - skb_reserve(skb, pad); - skb_put(skb, len); +void bnx2x_calc_fc_adv(struct bnx2x *bp) +{ + switch (bp->link_vars.ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: + bp->port.advertising &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; - } else { - DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped because " - "of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; -reuse_rx: - bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); - goto next_rx; - } + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: + bp->port.advertising |= (ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; - skb->protocol = eth_type_trans(skb, bp->dev); + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: + bp->port.advertising |= ADVERTISED_Asym_Pause; + break; - if ((bp->dev->features & NETIF_F_RXHASH) && - (cqe_fp_status_flags & - ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) - skb->rxhash = le32_to_cpu( - cqe->fast_path_cqe.rss_hash_result); + default: + bp->port.advertising &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + } +} - skb->ip_summed = CHECKSUM_NONE; - if (bp->rx_csum) { - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - fp->eth_q_stats.hw_csum_err++; - } - } - skb_record_rx_queue(skb, fp->index); +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) +{ + if (!BP_NOMCP(bp)) { + u8 rc; -#ifdef BCM_VLAN - if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && - (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & - PARSING_FLAGS_VLAN)) - vlan_gro_receive(&fp->napi, bp->vlgrp, - le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb); + /* Initialize link parameters structure variables */ + /* It is recommended to turn off RX FC for jumbo frames + for better performance */ + if (bp->dev->mtu > 5000) + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; else -#endif - napi_gro_receive(&fp->napi, skb); - + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; -next_rx: - rx_buf->skb = NULL; + bnx2x_acquire_phy_lock(bp); - bd_cons = NEXT_RX_IDX(bd_cons); - bd_prod = NEXT_RX_IDX(bd_prod); - bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); - rx_pkt++; -next_cqe: - sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); - sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + if (load_mode == LOAD_DIAG) + bp->link_params.loopback_mode = LOOPBACK_XGXS_10; - if (rx_pkt == budget) - break; - } /* while */ + rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); - fp->rx_bd_cons = bd_cons; - fp->rx_bd_prod = bd_prod_fw; - fp->rx_comp_cons = sw_comp_cons; - fp->rx_comp_prod = sw_comp_prod; + bnx2x_release_phy_lock(bp); - /* Update producers */ - bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, - fp->rx_sge_prod); + bnx2x_calc_fc_adv(bp); - fp->rx_pkt += rx_pkt; - fp->rx_calls++; + if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + bnx2x_link_report(bp); + } - return rx_pkt; + return rc; + } + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + return -EINVAL; } -static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) +void bnx2x_link_set(struct bnx2x *bp) { - struct bnx2x_fastpath *fp = fp_cookie; - struct bnx2x *bp = fp->bp; + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + bnx2x_release_phy_lock(bp); - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return IRQ_HANDLED; - } + bnx2x_calc_fc_adv(bp); + } else + BNX2X_ERR("Bootcode is missing - can not set link\n"); +} - DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", - fp->index, fp->sb_id); - bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); +static void bnx2x__link_reset(struct bnx2x *bp) +{ + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not reset link\n"); +} -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return IRQ_HANDLED; -#endif +u8 bnx2x_link_test(struct bnx2x *bp) +{ + u8 rc = 0; - /* Handle Rx and Tx according to MSI-X vector */ - prefetch(fp->rx_cons_sb); - prefetch(fp->tx_cons_sb); - prefetch(&fp->status_blk->u_status_block.status_block_index); - prefetch(&fp->status_blk->c_status_block.status_block_index); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not test link\n"); - return IRQ_HANDLED; + return rc; } -static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) +static void bnx2x_init_port_minmax(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev_instance); - u16 status = bnx2x_ack_int(bp); - u16 mask; - int i; + u32 r_param = bp->link_vars.line_speed / 8; + u32 fair_periodic_timeout_usec; + u32 t_fair; - /* Return here if interrupt is shared and it's not for us */ - if (unlikely(status == 0)) { - DP(NETIF_MSG_INTR, "not our interrupt!\n"); - return IRQ_NONE; - } - DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); + memset(&(bp->cmng.rs_vars), 0, + sizeof(struct rate_shaping_vars_per_port)); + memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return IRQ_HANDLED; - } + /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ + bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return IRQ_HANDLED; -#endif + /* this is the threshold below which no timer arming will occur + 1.25 coefficient is for the threshold to be a little bigger + than the real time, to compensate for timer in-accuracy */ + bp->cmng.rs_vars.rs_threshold = + (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; - for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { - struct bnx2x_fastpath *fp = &bp->fp[i]; + /* resolution of fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + /* for 10G it is 1000usec. for 1G it is 10000usec. */ + t_fair = T_FAIR_COEF / bp->link_vars.line_speed; - mask = 0x2 << fp->sb_id; - if (status & mask) { - /* Handle Rx and Tx according to SB id */ - prefetch(fp->rx_cons_sb); - prefetch(&fp->status_blk->u_status_block. - status_block_index); - prefetch(fp->tx_cons_sb); - prefetch(&fp->status_blk->c_status_block. - status_block_index); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - status &= ~mask; - } - } + /* this is the threshold below which we won't arm the timer anymore */ + bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; -#ifdef BCM_CNIC - mask = 0x2 << CNIC_SB_ID(bp); - if (status & (mask | 0x1)) { - struct cnic_ops *c_ops = NULL; + /* we multiply by 1e3/8 to get bytes/msec. + We don't want the credits to pass a credit + of the t_fair*FAIR_MEM (algorithm resolution) */ + bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; + /* since each tick is 4 usec */ + bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; +} - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - c_ops->cnic_handler(bp->cnic_data, NULL); - rcu_read_unlock(); +/* Calculates the sum of vn_min_rates. + It's needed for further normalizing of the min_rates. + Returns: + sum of vn_min_rates. + or + 0 - if all the min_rates are 0. + In the later case fainess algorithm should be deactivated. + If not all min_rates are zero then those that are zeroes will be set to 1. + */ +static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) +{ + int all_zero = 1; + int port = BP_PORT(bp); + int vn; - status &= ~mask; - } -#endif + bp->vn_weight_sum = 0; + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + int func = 2*vn + port; + u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); + u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - if (unlikely(status & 0x1)) { - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + /* Skip hidden vns */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) + continue; - status &= ~0x1; - if (!status) - return IRQ_HANDLED; - } + /* If min rate is zero - set it to 1 */ + if (!vn_min_rate) + vn_min_rate = DEF_MIN_RATE; + else + all_zero = 0; - if (unlikely(status)) - DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", - status); + bp->vn_weight_sum += vn_min_rate; + } - return IRQ_HANDLED; + /* ... only if all min rates are zeros - disable fairness */ + if (all_zero) { + bp->cmng.flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, "All MIN values are zeroes" + " fairness will be disabled\n"); + } else + bp->cmng.flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } -/* end of fast path */ - -static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); - -/* Link */ - -/* - * General service functions - */ - -static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) +static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) { - u32 lock_status; - u32 resource_bit = (1 << resource); - int func = BP_FUNC(bp); - u32 hw_lock_control_reg; - int cnt; + struct rate_shaping_vars_per_vn m_rs_vn; + struct fairness_vars_per_vn m_fair_vn; + u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); + u16 vn_min_rate, vn_max_rate; + int i; - /* Validating that the resource is within range */ - if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, - "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", - resource, HW_LOCK_MAX_RESOURCE_VALUE); - return -EINVAL; - } + /* If function is hidden - set min and max to zeroes */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { + vn_min_rate = 0; + vn_max_rate = 0; - if (func <= 5) { - hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { - hw_lock_control_reg = - (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; + /* If min rate is zero - set it to 1 */ + if (!vn_min_rate) + vn_min_rate = DEF_MIN_RATE; + vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT) * 100; } + DP(NETIF_MSG_IFUP, + "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", + func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); - /* Validating that the resource is not already taken */ - lock_status = REG_RD(bp, hw_lock_control_reg); - if (lock_status & resource_bit) { - DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", - lock_status, resource_bit); - return -EEXIST; - } + memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); + memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); - /* Try for 5 second every 5ms */ - for (cnt = 0; cnt < 1000; cnt++) { - /* Try to acquire the lock */ - REG_WR(bp, hw_lock_control_reg + 4, resource_bit); - lock_status = REG_RD(bp, hw_lock_control_reg); - if (lock_status & resource_bit) - return 0; + /* global vn counter - maximal Mbps for this vn */ + m_rs_vn.vn_counter.rate = vn_max_rate; - msleep(5); + /* quota - number of bytes transmitted in this period */ + m_rs_vn.vn_counter.quota = + (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; + + if (bp->vn_weight_sum) { + /* credit for each period of the fairness algorithm: + number of bytes in T_FAIR (the vn share the port rate). + vn_weight_sum should not be larger than 10000, thus + T_FAIR_COEF / (8 * vn_weight_sum) will always be greater + than zero */ + m_fair_vn.vn_credit_delta = + max_t(u32, (vn_min_rate * (T_FAIR_COEF / + (8 * bp->vn_weight_sum))), + (bp->cmng.fair_vars.fair_threshold * 2)); + DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", + m_fair_vn.vn_credit_delta); } - DP(NETIF_MSG_HW, "Timeout\n"); - return -EAGAIN; + + /* Store it to internal memory */ + for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, + ((u32 *)(&m_rs_vn))[i]); + + for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, + ((u32 *)(&m_fair_vn))[i]); } -static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) + +/* This function is called upon link interrupt */ +static void bnx2x_link_attn(struct bnx2x *bp) { - u32 lock_status; - u32 resource_bit = (1 << resource); - int func = BP_FUNC(bp); - u32 hw_lock_control_reg; + u32 prev_link_status = bp->link_vars.link_status; + /* Make sure that we are synced with the current statistics */ + bnx2x_stats_handle(bp, STATS_EVENT_STOP); - DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); + bnx2x_link_update(&bp->link_params, &bp->link_vars); - /* Validating that the resource is within range */ - if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, - "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", - resource, HW_LOCK_MAX_RESOURCE_VALUE); - return -EINVAL; - } + if (bp->link_vars.link_up) { - if (func <= 5) { - hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); - } else { - hw_lock_control_reg = - (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); - } + /* dropless flow control */ + if (CHIP_IS_E1H(bp) && bp->dropless_fc) { + int port = BP_PORT(bp); + u32 pause_enabled = 0; - /* Validating that the resource is currently taken */ - lock_status = REG_RD(bp, hw_lock_control_reg); - if (!(lock_status & resource_bit)) { - DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", - lock_status, resource_bit); - return -EFAULT; - } + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_enabled = 1; - REG_WR(bp, hw_lock_control_reg, resource_bit); - return 0; -} + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_ETH_PAUSE_ENABLED_OFFSET(port), + pause_enabled); + } -/* HW Lock for shared dual port PHYs */ -static void bnx2x_acquire_phy_lock(struct bnx2x *bp) -{ - mutex_lock(&bp->port.phy_mutex); + if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { + struct host_port_stats *pstats; - if (bp->port.need_hw_lock) - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); -} + pstats = bnx2x_sp(bp, port_stats); + /* reset old bmac stats */ + memset(&(pstats->mac_stx[0]), 0, + sizeof(struct mac_stx)); + } + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + } -static void bnx2x_release_phy_lock(struct bnx2x *bp) -{ - if (bp->port.need_hw_lock) - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); + /* indicate link status only if link status actually changed */ + if (prev_link_status != bp->link_vars.link_status) + bnx2x_link_report(bp); - mutex_unlock(&bp->port.phy_mutex); -} + if (IS_E1HMF(bp)) { + int port = BP_PORT(bp); + int func; + int vn; -int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) -{ - /* The GPIO should be swapped if swap register is set and active */ - int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && - REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; - int gpio_shift = gpio_num + - (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); - u32 gpio_mask = (1 << gpio_shift); - u32 gpio_reg; - int value; + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + if (vn == BP_E1HVN(bp)) + continue; - if (gpio_num > MISC_REGISTERS_GPIO_3) { - BNX2X_ERR("Invalid GPIO %d\n", gpio_num); - return -EINVAL; - } + func = ((vn << 1) | port); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } - /* read GPIO value */ - gpio_reg = REG_RD(bp, MISC_REG_GPIO); + if (bp->link_vars.link_up) { + int i; - /* get the requested pin value */ - if ((gpio_reg & gpio_mask) == gpio_mask) - value = 1; - else - value = 0; + /* Init rate shaping and fairness contexts */ + bnx2x_init_port_minmax(bp); - DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, 2*vn + port); - return value; + /* Store it to internal memory */ + for (i = 0; + i < sizeof(struct cmng_struct_per_port) / 4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, + ((u32 *)(&bp->cmng))[i]); + } + } } -int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +void bnx2x__link_status_update(struct bnx2x *bp) { - /* The GPIO should be swapped if swap register is set and active */ - int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && - REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; - int gpio_shift = gpio_num + - (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); - u32 gpio_mask = (1 << gpio_shift); - u32 gpio_reg; + if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) + return; - if (gpio_num > MISC_REGISTERS_GPIO_3) { - BNX2X_ERR("Invalid GPIO %d\n", gpio_num); - return -EINVAL; - } + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - /* read GPIO and mask except the float bits */ - gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); + if (bp->link_vars.link_up) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + else + bnx2x_stats_handle(bp, STATS_EVENT_STOP); - switch (mode) { - case MISC_REGISTERS_GPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", - gpio_num, gpio_shift); - /* clear FLOAT and set CLR */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); - break; + bnx2x_calc_vn_weight_sum(bp); - case MISC_REGISTERS_GPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", - gpio_num, gpio_shift); - /* clear FLOAT and set SET */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); - break; + /* indicate link status */ + bnx2x_link_report(bp); +} - case MISC_REGISTERS_GPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", - gpio_num, gpio_shift); - /* set FLOAT */ - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); - break; +static void bnx2x_pmf_update(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val; - default: - break; - } + bp->port.pmf = 1; + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); - REG_WR(bp, MISC_REG_GPIO, gpio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* enable nig attention */ + val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); - return 0; + bnx2x_stats_handle(bp, STATS_EVENT_PMF); } -int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +/* end of Link */ + +/* slow path */ + +/* + * General service functions + */ + +/* send the MCP a request, block until there is a reply */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) { - /* The GPIO should be swapped if swap register is set and active */ - int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && - REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; - int gpio_shift = gpio_num + - (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); - u32 gpio_mask = (1 << gpio_shift); - u32 gpio_reg; + int func = BP_FUNC(bp); + u32 seq = ++bp->fw_seq; + u32 rc = 0; + u32 cnt = 1; + u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; - if (gpio_num > MISC_REGISTERS_GPIO_3) { - BNX2X_ERR("Invalid GPIO %d\n", gpio_num); - return -EINVAL; - } + mutex_lock(&bp->fw_mb_mutex); + SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); + DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - /* read GPIO int */ - gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); + do { + /* let the FW do it's magic ... */ + msleep(delay); - switch (mode) { - case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: - DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " - "output low\n", gpio_num, gpio_shift); - /* clear SET and set CLR */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); - break; + rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); - case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: - DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " - "output high\n", gpio_num, gpio_shift); - /* clear CLR and set SET */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); - break; + /* Give the FW up to 5 second (500*10ms) */ + } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); - default: - break; - } + DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", + cnt*delay, rc, seq); - REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* is this a reply to our command? */ + if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) + rc &= FW_MSG_CODE_MASK; + else { + /* FW BUG! */ + BNX2X_ERR("FW failed to respond!\n"); + bnx2x_fw_dump(bp); + rc = 0; + } + mutex_unlock(&bp->fw_mb_mutex); - return 0; + return rc; } -static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) +static void bnx2x_e1h_disable(struct bnx2x *bp) { - u32 spio_mask = (1 << spio_num); - u32 spio_reg; + int port = BP_PORT(bp); - if ((spio_num < MISC_REGISTERS_SPIO_4) || - (spio_num > MISC_REGISTERS_SPIO_7)) { - BNX2X_ERR("Invalid SPIO %d\n", spio_num); - return -EINVAL; - } + netif_tx_disable(bp->dev); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); - /* read SPIO and mask except the float bits */ - spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - switch (mode) { - case MISC_REGISTERS_SPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); - /* clear FLOAT and set CLR */ - spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); - break; - - case MISC_REGISTERS_SPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); - /* clear FLOAT and set SET */ - spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); - break; - - case MISC_REGISTERS_SPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); - /* set FLOAT */ - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - break; - - default: - break; - } - - REG_WR(bp, MISC_REG_SPIO, spio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); - - return 0; + netif_carrier_off(bp->dev); } -static void bnx2x_calc_fc_adv(struct bnx2x *bp) +static void bnx2x_e1h_enable(struct bnx2x *bp) { - switch (bp->link_vars.ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: - bp->port.advertising &= ~(ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; + int port = BP_PORT(bp); - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: - bp->port.advertising |= (ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: - bp->port.advertising |= ADVERTISED_Asym_Pause; - break; + /* Tx queue should be only reenabled */ + netif_tx_wake_all_queues(bp->dev); - default: - bp->port.advertising &= ~(ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; - } + /* + * Should not call netif_carrier_on since it will be called if the link + * is up when checking for link state + */ } -static void bnx2x_link_report(struct bnx2x *bp) +static void bnx2x_update_min_max(struct bnx2x *bp) { - if (bp->flags & MF_FUNC_DIS) { - netif_carrier_off(bp->dev); - netdev_err(bp->dev, "NIC Link is Down\n"); - return; - } + int port = BP_PORT(bp); + int vn, i; - if (bp->link_vars.link_up) { - u16 line_speed; + /* Init rate shaping and fairness contexts */ + bnx2x_init_port_minmax(bp); - if (bp->state == BNX2X_STATE_OPEN) - netif_carrier_on(bp->dev); - netdev_info(bp->dev, "NIC Link is Up, "); + bnx2x_calc_vn_weight_sum(bp); - line_speed = bp->link_vars.line_speed; - if (IS_E1HMF(bp)) { - u16 vn_max_rate; + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, 2*vn + port); - vn_max_rate = - ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT) * 100; - if (vn_max_rate < line_speed) - line_speed = vn_max_rate; - } - pr_cont("%d Mbps ", line_speed); + if (bp->port.pmf) { + int func; - if (bp->link_vars.duplex == DUPLEX_FULL) - pr_cont("full duplex"); - else - pr_cont("half duplex"); - - if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { - pr_cont(", receive "); - if (bp->link_vars.flow_ctrl & - BNX2X_FLOW_CTRL_TX) - pr_cont("& transmit "); - } else { - pr_cont(", transmit "); - } - pr_cont("flow control ON"); + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + if (vn == BP_E1HVN(bp)) + continue; + + func = ((vn << 1) | port); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); } - pr_cont("\n"); - } else { /* link_down */ - netif_carrier_off(bp->dev); - netdev_err(bp->dev, "NIC Link is Down\n"); + /* Store it to internal memory */ + for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, + ((u32 *)(&bp->cmng))[i]); } } -static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) +static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) { - if (!BP_NOMCP(bp)) { - u8 rc; - - /* Initialize link parameters structure variables */ - /* It is recommended to turn off RX FC for jumbo frames - for better performance */ - if (bp->dev->mtu > 5000) - bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; - else - bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; - - bnx2x_acquire_phy_lock(bp); - - if (load_mode == LOAD_DIAG) - bp->link_params.loopback_mode = LOOPBACK_XGXS_10; + DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); - rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); + if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { - bnx2x_release_phy_lock(bp); + /* + * This is the only place besides the function initialization + * where the bp->flags can change so it is done without any + * locks + */ + if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; - bnx2x_calc_fc_adv(bp); + bnx2x_e1h_disable(bp); + } else { + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + bp->flags &= ~MF_FUNC_DIS; - if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { - bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - bnx2x_link_report(bp); + bnx2x_e1h_enable(bp); } + dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + } + if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { - return rc; + bnx2x_update_min_max(bp); + dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; } - BNX2X_ERR("Bootcode is missing - can not initialize link\n"); - return -EINVAL; + + /* Report results to MCP */ + if (dcc_event) + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE); + else + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); } -static void bnx2x_link_set(struct bnx2x *bp) +/* must be called under the spq lock */ +static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) { - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_phy_init(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); + struct eth_spe *next_spe = bp->spq_prod_bd; - bnx2x_calc_fc_adv(bp); - } else - BNX2X_ERR("Bootcode is missing - can not set link\n"); + if (bp->spq_prod_bd == bp->spq_last_bd) { + bp->spq_prod_bd = bp->spq; + bp->spq_prod_idx = 0; + DP(NETIF_MSG_TIMER, "end of spq\n"); + } else { + bp->spq_prod_bd++; + bp->spq_prod_idx++; + } + return next_spe; } -static void bnx2x__link_reset(struct bnx2x *bp) +/* must be called under the spq lock */ +static inline void bnx2x_sp_prod_update(struct bnx2x *bp) { - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); - bnx2x_release_phy_lock(bp); - } else - BNX2X_ERR("Bootcode is missing - can not reset link\n"); + int func = BP_FUNC(bp); + + /* Make sure that BD data is updated before writing the producer */ + wmb(); + + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), + bp->spq_prod_idx); + mmiowb(); } -static u8 bnx2x_link_test(struct bnx2x *bp) +/* the slow path queue is odd since completions arrive on the fastpath ring */ +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int common) { - u8 rc = 0; + struct eth_spe *spe; - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); - } else - BNX2X_ERR("Bootcode is missing - can not test link\n"); +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EIO; +#endif - return rc; -} + spin_lock_bh(&bp->spq_lock); -static void bnx2x_init_port_minmax(struct bnx2x *bp) -{ - u32 r_param = bp->link_vars.line_speed / 8; - u32 fair_periodic_timeout_usec; - u32 t_fair; + if (!bp->spq_left) { + BNX2X_ERR("BUG! SPQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } - memset(&(bp->cmng.rs_vars), 0, - sizeof(struct rate_shaping_vars_per_port)); - memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); + spe = bnx2x_sp_get_next(bp); - /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ - bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; + /* CID needs port number to be encoded int it */ + spe->hdr.conn_and_cmd_data = + cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | + HW_CID(bp, cid)); + spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); + if (common) + spe->hdr.type |= + cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); - /* this is the threshold below which no timer arming will occur - 1.25 coefficient is for the threshold to be a little bigger - than the real time, to compensate for timer in-accuracy */ - bp->cmng.rs_vars.rs_threshold = - (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; + spe->data.mac_config_addr.hi = cpu_to_le32(data_hi); + spe->data.mac_config_addr.lo = cpu_to_le32(data_lo); - /* resolution of fairness timer */ - fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; - /* for 10G it is 1000usec. for 1G it is 10000usec. */ - t_fair = T_FAIR_COEF / bp->link_vars.line_speed; + bp->spq_left--; - /* this is the threshold below which we won't arm the timer anymore */ - bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; + DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, + "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", + bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), + (u32)(U64_LO(bp->spq_mapping) + + (void *)bp->spq_prod_bd - (void *)bp->spq), command, + HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); - /* we multiply by 1e3/8 to get bytes/msec. - We don't want the credits to pass a credit - of the t_fair*FAIR_MEM (algorithm resolution) */ - bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; - /* since each tick is 4 usec */ - bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; + bnx2x_sp_prod_update(bp); + spin_unlock_bh(&bp->spq_lock); + return 0; } -/* Calculates the sum of vn_min_rates. - It's needed for further normalizing of the min_rates. - Returns: - sum of vn_min_rates. - or - 0 - if all the min_rates are 0. - In the later case fainess algorithm should be deactivated. - If not all min_rates are zero then those that are zeroes will be set to 1. - */ -static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) +/* acquire split MCP access lock register */ +static int bnx2x_acquire_alr(struct bnx2x *bp) { - int all_zero = 1; - int port = BP_PORT(bp); - int vn; - - bp->vn_weight_sum = 0; - for (vn = VN_0; vn < E1HVN_MAX; vn++) { - int func = 2*vn + port; - u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); - u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - - /* Skip hidden vns */ - if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) - continue; + u32 j, val; + int rc = 0; - /* If min rate is zero - set it to 1 */ - if (!vn_min_rate) - vn_min_rate = DEF_MIN_RATE; - else - all_zero = 0; + might_sleep(); + for (j = 0; j < 1000; j++) { + val = (1UL << 31); + REG_WR(bp, GRCBASE_MCP + 0x9c, val); + val = REG_RD(bp, GRCBASE_MCP + 0x9c); + if (val & (1L << 31)) + break; - bp->vn_weight_sum += vn_min_rate; + msleep(5); + } + if (!(val & (1L << 31))) { + BNX2X_ERR("Cannot acquire MCP access lock register\n"); + rc = -EBUSY; } - /* ... only if all min rates are zeros - disable fairness */ - if (all_zero) { - bp->cmng.flags.cmng_enables &= - ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); - } else - bp->cmng.flags.cmng_enables |= - CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + return rc; } -static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) +/* release split MCP access lock register */ +static void bnx2x_release_alr(struct bnx2x *bp) { - struct rate_shaping_vars_per_vn m_rs_vn; - struct fairness_vars_per_vn m_fair_vn; - u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); - u16 vn_min_rate, vn_max_rate; - int i; + REG_WR(bp, GRCBASE_MCP + 0x9c, 0); +} - /* If function is hidden - set min and max to zeroes */ - if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { - vn_min_rate = 0; - vn_max_rate = 0; +static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) +{ + struct host_def_status_block *def_sb = bp->def_status_blk; + u16 rc = 0; - } else { - vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - /* If min rate is zero - set it to 1 */ - if (!vn_min_rate) - vn_min_rate = DEF_MIN_RATE; - vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT) * 100; + barrier(); /* status block is written to by the chip */ + if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { + bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; + rc |= 1; } - DP(NETIF_MSG_IFUP, - "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", - func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); - - memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); - memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); - - /* global vn counter - maximal Mbps for this vn */ - m_rs_vn.vn_counter.rate = vn_max_rate; - - /* quota - number of bytes transmitted in this period */ - m_rs_vn.vn_counter.quota = - (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; - - if (bp->vn_weight_sum) { - /* credit for each period of the fairness algorithm: - number of bytes in T_FAIR (the vn share the port rate). - vn_weight_sum should not be larger than 10000, thus - T_FAIR_COEF / (8 * vn_weight_sum) will always be greater - than zero */ - m_fair_vn.vn_credit_delta = - max_t(u32, (vn_min_rate * (T_FAIR_COEF / - (8 * bp->vn_weight_sum))), - (bp->cmng.fair_vars.fair_threshold * 2)); - DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", - m_fair_vn.vn_credit_delta); + if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) { + bp->def_c_idx = def_sb->c_def_status_block.status_block_index; + rc |= 2; } - - /* Store it to internal memory */ - for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_rs_vn))[i]); - - for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_fair_vn))[i]); + if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) { + bp->def_u_idx = def_sb->u_def_status_block.status_block_index; + rc |= 4; + } + if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) { + bp->def_x_idx = def_sb->x_def_status_block.status_block_index; + rc |= 8; + } + if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) { + bp->def_t_idx = def_sb->t_def_status_block.status_block_index; + rc |= 16; + } + return rc; } +/* + * slow path service functions + */ -/* This function is called upon link interrupt */ -static void bnx2x_link_attn(struct bnx2x *bp) +static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) { - u32 prev_link_status = bp->link_vars.link_status; - /* Make sure that we are synced with the current statistics */ - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - bnx2x_link_update(&bp->link_params, &bp->link_vars); + int port = BP_PORT(bp); + u32 hc_addr = (HC_REG_COMMAND_REG + port*32 + + COMMAND_REG_ATTN_BITS_SET); + u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : + NIG_REG_MASK_INTERRUPT_PORT0; + u32 aeu_mask; + u32 nig_mask = 0; - if (bp->link_vars.link_up) { + if (bp->attn_state & asserted) + BNX2X_ERR("IGU ERROR\n"); - /* dropless flow control */ - if (CHIP_IS_E1H(bp) && bp->dropless_fc) { - int port = BP_PORT(bp); - u32 pause_enabled = 0; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, aeu_addr); - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) - pause_enabled = 1; + DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", + aeu_mask, asserted); + aeu_mask &= ~(asserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_ETH_PAUSE_ENABLED_OFFSET(port), - pause_enabled); - } + REG_WR(bp, aeu_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { - struct host_port_stats *pstats; + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state |= asserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); - pstats = bnx2x_sp(bp, port_stats); - /* reset old bmac stats */ - memset(&(pstats->mac_stx[0]), 0, - sizeof(struct mac_stx)); - } - if (bp->state == BNX2X_STATE_OPEN) - bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - } + if (asserted & ATTN_HARD_WIRED_MASK) { + if (asserted & ATTN_NIG_FOR_FUNC) { - /* indicate link status only if link status actually changed */ - if (prev_link_status != bp->link_vars.link_status) - bnx2x_link_report(bp); + bnx2x_acquire_phy_lock(bp); - if (IS_E1HMF(bp)) { - int port = BP_PORT(bp); - int func; - int vn; + /* save nig interrupt mask */ + nig_mask = REG_RD(bp, nig_int_mask_addr); + REG_WR(bp, nig_int_mask_addr, 0); - /* Set the attention towards other drivers on the same port */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) { - if (vn == BP_E1HVN(bp)) - continue; + bnx2x_link_attn(bp); - func = ((vn << 1) | port); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + - (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + /* handle unicore attn? */ } + if (asserted & ATTN_SW_TIMER_4_FUNC) + DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); - if (bp->link_vars.link_up) { - int i; + if (asserted & GPIO_2_FUNC) + DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); - /* Init rate shaping and fairness contexts */ - bnx2x_init_port_minmax(bp); + if (asserted & GPIO_3_FUNC) + DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); - for (vn = VN_0; vn < E1HVN_MAX; vn++) - bnx2x_init_vn_minmax(bp, 2*vn + port); + if (asserted & GPIO_4_FUNC) + DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); - /* Store it to internal memory */ - for (i = 0; - i < sizeof(struct cmng_struct_per_port) / 4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, - ((u32 *)(&bp->cmng))[i]); + if (port == 0) { + if (asserted & ATTN_GENERAL_ATTN_1) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_2) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_3) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); + } + } else { + if (asserted & ATTN_GENERAL_ATTN_4) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_5) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_6) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); + } } - } -} -static void bnx2x__link_status_update(struct bnx2x *bp) -{ - if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) - return; + } /* if hardwired */ - bnx2x_link_status_update(&bp->link_params, &bp->link_vars); - - if (bp->link_vars.link_up) - bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - else - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - bnx2x_calc_vn_weight_sum(bp); + DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", + asserted, hc_addr); + REG_WR(bp, hc_addr, asserted); - /* indicate link status */ - bnx2x_link_report(bp); + /* now set back the mask */ + if (asserted & ATTN_NIG_FOR_FUNC) { + REG_WR(bp, nig_int_mask_addr, nig_mask); + bnx2x_release_phy_lock(bp); + } } -static void bnx2x_pmf_update(struct bnx2x *bp) +static inline void bnx2x_fan_failure(struct bnx2x *bp) { int port = BP_PORT(bp); - u32 val; - bp->port.pmf = 1; - DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); - - /* enable nig attention */ - val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); + /* mark the failure */ + bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; + SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, + bp->link_params.ext_phy_config); - bnx2x_stats_handle(bp, STATS_EVENT_PMF); + /* log the failure */ + netdev_err(bp->dev, "Fan Failure on Network Controller has caused" + " the driver to shutdown the card to prevent permanent" + " damage. Please contact OEM Support for assistance\n"); } -/* end of Link */ - -/* slow path */ +static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) +{ + int port = BP_PORT(bp); + int reg_offset; + u32 val, swap_val, swap_override; -/* - * General service functions - */ + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); -/* send the MCP a request, block until there is a reply */ -u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) -{ - int func = BP_FUNC(bp); - u32 seq = ++bp->fw_seq; - u32 rc = 0; - u32 cnt = 1; - u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; + if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { - mutex_lock(&bp->fw_mb_mutex); - SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); - DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); + val = REG_RD(bp, reg_offset); + val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_offset, val); - do { - /* let the FW do it's magic ... */ - msleep(delay); + BNX2X_ERR("SPIO5 hw attention\n"); - rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); + /* Fan failure attention */ + switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + /* Low power mode is controlled by GPIO 2 */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + /* The PHY reset is controlled by GPIO 1 */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + break; - /* Give the FW up to 5 second (500*10ms) */ - } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + /* The PHY reset is controlled by GPIO 1 */ + /* fake the port number to cancel the swap done in + set_gpio() */ + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port = (swap_val && swap_override) ^ 1; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + break; - DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", - cnt*delay, rc, seq); + default: + break; + } + bnx2x_fan_failure(bp); + } - /* is this a reply to our command? */ - if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) - rc &= FW_MSG_CODE_MASK; - else { - /* FW BUG! */ - BNX2X_ERR("FW failed to respond!\n"); - bnx2x_fw_dump(bp); - rc = 0; + if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 | + AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_handle_module_detect_int(&bp->link_params); + bnx2x_release_phy_lock(bp); } - mutex_unlock(&bp->fw_mb_mutex); - return rc; -} + if (attn & HW_INTERRUT_ASSERT_SET_0) { -static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); -static void bnx2x_set_rx_mode(struct net_device *dev); + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + REG_WR(bp, reg_offset, val); -static void bnx2x_e1h_disable(struct bnx2x *bp) + BNX2X_ERR("FATAL HW block attention set0 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); + bnx2x_panic(); + } +} + +static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) { - int port = BP_PORT(bp); + u32 val; - netif_tx_disable(bp->dev); + if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); + BNX2X_ERR("DB hw attention 0x%x\n", val); + /* DORQ discard attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from DORQ\n"); + } - netif_carrier_off(bp->dev); -} + if (attn & HW_INTERRUT_ASSERT_SET_1) { -static void bnx2x_e1h_enable(struct bnx2x *bp) -{ - int port = BP_PORT(bp); + int port = BP_PORT(bp); + int reg_offset; - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); - /* Tx queue should be only reenabled */ - netif_tx_wake_all_queues(bp->dev); + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + REG_WR(bp, reg_offset, val); - /* - * Should not call netif_carrier_on since it will be called if the link - * is up when checking for link state - */ + BNX2X_ERR("FATAL HW block attention set1 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); + bnx2x_panic(); + } } -static void bnx2x_update_min_max(struct bnx2x *bp) +static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) { - int port = BP_PORT(bp); - int vn, i; + u32 val; - /* Init rate shaping and fairness contexts */ - bnx2x_init_port_minmax(bp); + if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { - bnx2x_calc_vn_weight_sum(bp); + val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); + BNX2X_ERR("CFC hw attention 0x%x\n", val); + /* CFC error attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from CFC\n"); + } - for (vn = VN_0; vn < E1HVN_MAX; vn++) - bnx2x_init_vn_minmax(bp, 2*vn + port); + if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { - if (bp->port.pmf) { - int func; + val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); + BNX2X_ERR("PXP hw attention 0x%x\n", val); + /* RQ_USDMDP_FIFO_OVERFLOW */ + if (val & 0x18000) + BNX2X_ERR("FATAL error from PXP\n"); + } - /* Set the attention towards other drivers on the same port */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) { - if (vn == BP_E1HVN(bp)) - continue; + if (attn & HW_INTERRUT_ASSERT_SET_2) { - func = ((vn << 1) | port); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + - (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); - } + int port = BP_PORT(bp); + int reg_offset; - /* Store it to internal memory */ - for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, - ((u32 *)(&bp->cmng))[i]); + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set2 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); + bnx2x_panic(); } } -static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) +static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) { - DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); - - if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { + u32 val; - /* - * This is the only place besides the function initialization - * where the bp->flags can change so it is done without any - * locks - */ - if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { - DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); - bp->flags |= MF_FUNC_DIS; + if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { - bnx2x_e1h_disable(bp); - } else { - DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); - bp->flags &= ~MF_FUNC_DIS; + if (attn & BNX2X_PMF_LINK_ASSERT) { + int func = BP_FUNC(bp); - bnx2x_e1h_enable(bp); - } - dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; - } - if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + bp->mf_config = SHMEM_RD(bp, + mf_cfg.func_mf_config[func].config); + val = SHMEM_RD(bp, func_mb[func].drv_status); + if (val & DRV_STATUS_DCC_EVENT_MASK) + bnx2x_dcc_event(bp, + (val & DRV_STATUS_DCC_EVENT_MASK)); + bnx2x__link_status_update(bp); + if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) + bnx2x_pmf_update(bp); - bnx2x_update_min_max(bp); - dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; - } + } else if (attn & BNX2X_MC_ASSERT_BITS) { - /* Report results to MCP */ - if (dcc_event) - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE); - else - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); -} + BNX2X_ERR("MC assert!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); + bnx2x_panic(); -/* must be called under the spq lock */ -static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) -{ - struct eth_spe *next_spe = bp->spq_prod_bd; + } else if (attn & BNX2X_MCP_ASSERT) { - if (bp->spq_prod_bd == bp->spq_last_bd) { - bp->spq_prod_bd = bp->spq; - bp->spq_prod_idx = 0; - DP(NETIF_MSG_TIMER, "end of spq\n"); - } else { - bp->spq_prod_bd++; - bp->spq_prod_idx++; + BNX2X_ERR("MCP assert!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); + bnx2x_fw_dump(bp); + + } else + BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); + } + + if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { + BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); + if (attn & BNX2X_GRC_TIMEOUT) { + val = CHIP_IS_E1H(bp) ? + REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; + BNX2X_ERR("GRC time-out 0x%08x\n", val); + } + if (attn & BNX2X_GRC_RSV) { + val = CHIP_IS_E1H(bp) ? + REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; + BNX2X_ERR("GRC reserved 0x%08x\n", val); + } + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } - return next_spe; } -/* must be called under the spq lock */ -static inline void bnx2x_sp_prod_update(struct bnx2x *bp) +#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 +#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ +#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) +#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) +#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS +#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) +/* + * should be run under rtnl lock + */ +static inline void bnx2x_set_reset_done(struct bnx2x *bp) { - int func = BP_FUNC(bp); - - /* Make sure that BD data is updated before writing the producer */ - wmb(); - - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), - bp->spq_prod_idx); + u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); + val &= ~(1 << RESET_DONE_FLAG_SHIFT); + REG_WR(bp, BNX2X_MISC_GEN_REG, val); + barrier(); mmiowb(); } -/* the slow path queue is odd since completions arrive on the fastpath ring */ -static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, - u32 data_hi, u32 data_lo, int common) +/* + * should be run under rtnl lock + */ +static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp) { - struct eth_spe *spe; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return -EIO; -#endif - - spin_lock_bh(&bp->spq_lock); - - if (!bp->spq_left) { - BNX2X_ERR("BUG! SPQ ring full!\n"); - spin_unlock_bh(&bp->spq_lock); - bnx2x_panic(); - return -EBUSY; - } - - spe = bnx2x_sp_get_next(bp); - - /* CID needs port number to be encoded int it */ - spe->hdr.conn_and_cmd_data = - cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | - HW_CID(bp, cid)); - spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); - if (common) - spe->hdr.type |= - cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); + u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); + val |= (1 << 16); + REG_WR(bp, BNX2X_MISC_GEN_REG, val); + barrier(); + mmiowb(); +} - spe->data.mac_config_addr.hi = cpu_to_le32(data_hi); - spe->data.mac_config_addr.lo = cpu_to_le32(data_lo); +/* + * should be run under rtnl lock + */ +bool bnx2x_reset_is_done(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); + DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); + return (val & RESET_DONE_FLAG_MASK) ? false : true; +} - bp->spq_left--; +/* + * should be run under rtnl lock + */ +inline void bnx2x_inc_load_cnt(struct bnx2x *bp) +{ + u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); - DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, - "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", - bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), - (u32)(U64_LO(bp->spq_mapping) + - (void *)bp->spq_prod_bd - (void *)bp->spq), command, - HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); - bnx2x_sp_prod_update(bp); - spin_unlock_bh(&bp->spq_lock); - return 0; + val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK; + REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); + barrier(); + mmiowb(); } -/* acquire split MCP access lock register */ -static int bnx2x_acquire_alr(struct bnx2x *bp) +/* + * should be run under rtnl lock + */ +u32 bnx2x_dec_load_cnt(struct bnx2x *bp) { - u32 j, val; - int rc = 0; + u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); - might_sleep(); - for (j = 0; j < 1000; j++) { - val = (1UL << 31); - REG_WR(bp, GRCBASE_MCP + 0x9c, val); - val = REG_RD(bp, GRCBASE_MCP + 0x9c); - if (val & (1L << 31)) - break; + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); - msleep(5); - } - if (!(val & (1L << 31))) { - BNX2X_ERR("Cannot acquire MCP access lock register\n"); - rc = -EBUSY; - } + val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK; + REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); + barrier(); + mmiowb(); - return rc; + return val1; } -/* release split MCP access lock register */ -static void bnx2x_release_alr(struct bnx2x *bp) +/* + * should be run under rtnl lock + */ +static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp) { - REG_WR(bp, GRCBASE_MCP + 0x9c, 0); + return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK; } -static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) +static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) { - struct host_def_status_block *def_sb = bp->def_status_blk; - u16 rc = 0; - - barrier(); /* status block is written to by the chip */ - if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { - bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; - rc |= 1; - } - if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) { - bp->def_c_idx = def_sb->c_def_status_block.status_block_index; - rc |= 2; - } - if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) { - bp->def_u_idx = def_sb->u_def_status_block.status_block_index; - rc |= 4; - } - if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) { - bp->def_x_idx = def_sb->x_def_status_block.status_block_index; - rc |= 8; - } - if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) { - bp->def_t_idx = def_sb->t_def_status_block.status_block_index; - rc |= 16; - } - return rc; + u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); + REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK)); } -/* - * slow path service functions - */ +static inline void _print_next_block(int idx, const char *blk) +{ + if (idx) + pr_cont(", "); + pr_cont("%s", blk); +} -static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) +static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) { - int port = BP_PORT(bp); - u32 hc_addr = (HC_REG_COMMAND_REG + port*32 + - COMMAND_REG_ATTN_BITS_SET); - u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0; - u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : - NIG_REG_MASK_INTERRUPT_PORT0; - u32 aeu_mask; - u32 nig_mask = 0; + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + _print_next_block(par_num++, "BRB"); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + _print_next_block(par_num++, "PARSER"); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + _print_next_block(par_num++, "TSDM"); + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + _print_next_block(par_num++, "SEARCHER"); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + _print_next_block(par_num++, "TSEMI"); + break; + } - if (bp->attn_state & asserted) - BNX2X_ERR("IGU ERROR\n"); + /* Clear the bit */ + sig &= ~cur_bit; + } + } - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - aeu_mask = REG_RD(bp, aeu_addr); + return par_num; +} - DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", - aeu_mask, asserted); - aeu_mask &= ~(asserted & 0x3ff); - DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); - - REG_WR(bp, aeu_addr, aeu_mask); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - - DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); - bp->attn_state |= asserted; - DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); - - if (asserted & ATTN_HARD_WIRED_MASK) { - if (asserted & ATTN_NIG_FOR_FUNC) { +static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + _print_next_block(par_num++, "PBCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: + _print_next_block(par_num++, "QM"); + break; + case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: + _print_next_block(par_num++, "XSDM"); + break; + case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: + _print_next_block(par_num++, "XSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: + _print_next_block(par_num++, "DOORBELLQ"); + break; + case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: + _print_next_block(par_num++, "VAUX PCI CORE"); + break; + case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: + _print_next_block(par_num++, "DEBUG"); + break; + case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: + _print_next_block(par_num++, "USDM"); + break; + case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: + _print_next_block(par_num++, "USEMI"); + break; + case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: + _print_next_block(par_num++, "UPB"); + break; + case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: + _print_next_block(par_num++, "CSDM"); + break; + } - bnx2x_acquire_phy_lock(bp); + /* Clear the bit */ + sig &= ~cur_bit; + } + } - /* save nig interrupt mask */ - nig_mask = REG_RD(bp, nig_int_mask_addr); - REG_WR(bp, nig_int_mask_addr, 0); + return par_num; +} - bnx2x_link_attn(bp); +static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + _print_next_block(par_num++, "CSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + _print_next_block(par_num++, "PXP"); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + _print_next_block(par_num++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + _print_next_block(par_num++, "CFC"); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + _print_next_block(par_num++, "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + _print_next_block(par_num++, "IGU"); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + _print_next_block(par_num++, "MISC"); + break; + } - /* handle unicore attn? */ + /* Clear the bit */ + sig &= ~cur_bit; } - if (asserted & ATTN_SW_TIMER_4_FUNC) - DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); - - if (asserted & GPIO_2_FUNC) - DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); - - if (asserted & GPIO_3_FUNC) - DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); + } - if (asserted & GPIO_4_FUNC) - DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); + return par_num; +} - if (port == 0) { - if (asserted & ATTN_GENERAL_ATTN_1) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_2) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_3) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); - } - } else { - if (asserted & ATTN_GENERAL_ATTN_4) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_5) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_6) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); +static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: + _print_next_block(par_num++, "MCP ROM"); + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: + _print_next_block(par_num++, "MCP UMP RX"); + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: + _print_next_block(par_num++, "MCP UMP TX"); + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: + _print_next_block(par_num++, "MCP SCPAD"); + break; } - } - } /* if hardwired */ + /* Clear the bit */ + sig &= ~cur_bit; + } + } - DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", - asserted, hc_addr); - REG_WR(bp, hc_addr, asserted); + return par_num; +} - /* now set back the mask */ - if (asserted & ATTN_NIG_FOR_FUNC) { - REG_WR(bp, nig_int_mask_addr, nig_mask); - bnx2x_release_phy_lock(bp); - } +static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1, + u32 sig2, u32 sig3) +{ + if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) || + (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) { + int par_num = 0; + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " + "[0]:0x%08x [1]:0x%08x " + "[2]:0x%08x [3]:0x%08x\n", + sig0 & HW_PRTY_ASSERT_SET_0, + sig1 & HW_PRTY_ASSERT_SET_1, + sig2 & HW_PRTY_ASSERT_SET_2, + sig3 & HW_PRTY_ASSERT_SET_3); + printk(KERN_ERR"%s: Parity errors detected in blocks: ", + bp->dev->name); + par_num = bnx2x_print_blocks_with_parity0( + sig0 & HW_PRTY_ASSERT_SET_0, par_num); + par_num = bnx2x_print_blocks_with_parity1( + sig1 & HW_PRTY_ASSERT_SET_1, par_num); + par_num = bnx2x_print_blocks_with_parity2( + sig2 & HW_PRTY_ASSERT_SET_2, par_num); + par_num = bnx2x_print_blocks_with_parity3( + sig3 & HW_PRTY_ASSERT_SET_3, par_num); + printk("\n"); + return true; + } else + return false; } -static inline void bnx2x_fan_failure(struct bnx2x *bp) +bool bnx2x_chk_parity_attn(struct bnx2x *bp) { + struct attn_route attn; int port = BP_PORT(bp); - /* mark the failure */ - bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; - bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; - SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, - bp->link_params.ext_phy_config); + attn.sig[0] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + + port*4); + attn.sig[1] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + + port*4); + attn.sig[2] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + + port*4); + attn.sig[3] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + + port*4); - /* log the failure */ - netdev_err(bp->dev, "Fan Failure on Network Controller has caused" - " the driver to shutdown the card to prevent permanent" - " damage. Please contact OEM Support for assistance\n"); + return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2], + attn.sig[3]); } -static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) +static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) { + struct attn_route attn, *group_mask; int port = BP_PORT(bp); - int reg_offset; - u32 val, swap_val, swap_override; - - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); - - if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { + int index; + u32 reg_addr; + u32 val; + u32 aeu_mask; - val = REG_RD(bp, reg_offset); - val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; - REG_WR(bp, reg_offset, val); + /* need to take HW lock because MCP or other port might also + try to handle this event */ + bnx2x_acquire_alr(bp); - BNX2X_ERR("SPIO5 hw attention\n"); + if (bnx2x_chk_parity_attn(bp)) { + bp->recovery_state = BNX2X_RECOVERY_INIT; + bnx2x_set_reset_in_progress(bp); + schedule_delayed_work(&bp->reset_task, 0); + /* Disable HW interrupts */ + bnx2x_int_disable(bp); + bnx2x_release_alr(bp); + /* In case of parity errors don't handle attentions so that + * other function would "see" parity errors. + */ + return; + } - /* Fan failure attention */ - switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: - /* Low power mode is controlled by GPIO 2 */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - /* The PHY reset is controlled by GPIO 1 */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - break; + attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); + attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); + attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); + attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); + DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", + attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - /* The PHY reset is controlled by GPIO 1 */ - /* fake the port number to cancel the swap done in - set_gpio() */ - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - port = (swap_val && swap_override) ^ 1; - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - break; + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + if (deasserted & (1 << index)) { + group_mask = &bp->attn_group[index]; - default: - break; + DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", + index, group_mask->sig[0], group_mask->sig[1], + group_mask->sig[2], group_mask->sig[3]); + + bnx2x_attn_int_deasserted3(bp, + attn.sig[3] & group_mask->sig[3]); + bnx2x_attn_int_deasserted1(bp, + attn.sig[1] & group_mask->sig[1]); + bnx2x_attn_int_deasserted2(bp, + attn.sig[2] & group_mask->sig[2]); + bnx2x_attn_int_deasserted0(bp, + attn.sig[0] & group_mask->sig[0]); } - bnx2x_fan_failure(bp); } - if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 | - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_handle_module_detect_int(&bp->link_params); - bnx2x_release_phy_lock(bp); - } + bnx2x_release_alr(bp); - if (attn & HW_INTERRUT_ASSERT_SET_0) { + reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); - val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); - REG_WR(bp, reg_offset, val); + val = ~deasserted; + DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", + val, reg_addr); + REG_WR(bp, reg_addr, val); - BNX2X_ERR("FATAL HW block attention set0 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); - bnx2x_panic(); - } -} + if (~bp->attn_state & deasserted) + BNX2X_ERR("IGU ERROR\n"); -static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) -{ - u32 val; + reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; - if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, reg_addr); - val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); - BNX2X_ERR("DB hw attention 0x%x\n", val); - /* DORQ discard attention */ - if (val & 0x2) - BNX2X_ERR("FATAL error from DORQ\n"); - } + DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", + aeu_mask, deasserted); + aeu_mask |= (deasserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); - if (attn & HW_INTERRUT_ASSERT_SET_1) { + REG_WR(bp, reg_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - int port = BP_PORT(bp); - int reg_offset; + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state &= ~deasserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); +} - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); +static void bnx2x_attn_int(struct bnx2x *bp) +{ + /* read local copy of bits */ + u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits); + u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits_ack); + u32 attn_state = bp->attn_state; - val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); - REG_WR(bp, reg_offset, val); + /* look for changed bits */ + u32 asserted = attn_bits & ~attn_ack & ~attn_state; + u32 deasserted = ~attn_bits & attn_ack & attn_state; - BNX2X_ERR("FATAL HW block attention set1 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); - bnx2x_panic(); - } + DP(NETIF_MSG_HW, + "attn_bits %x attn_ack %x asserted %x deasserted %x\n", + attn_bits, attn_ack, asserted, deasserted); + + if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) + BNX2X_ERR("BAD attention state\n"); + + /* handle bits that were raised */ + if (asserted) + bnx2x_attn_int_asserted(bp, asserted); + + if (deasserted) + bnx2x_attn_int_deasserted(bp, deasserted); } -static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) +static void bnx2x_sp_task(struct work_struct *work) { - u32 val; - - if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { + struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); + u16 status; - val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); - BNX2X_ERR("CFC hw attention 0x%x\n", val); - /* CFC error attention */ - if (val & 0x2) - BNX2X_ERR("FATAL error from CFC\n"); + /* Return here if interrupt is disabled */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) { + DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); + return; } - if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { + status = bnx2x_update_dsb_idx(bp); +/* if (status == 0) */ +/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ - val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); - BNX2X_ERR("PXP hw attention 0x%x\n", val); - /* RQ_USDMDP_FIFO_OVERFLOW */ - if (val & 0x18000) - BNX2X_ERR("FATAL error from PXP\n"); + DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); + + /* HW attentions */ + if (status & 0x1) { + bnx2x_attn_int(bp); + status &= ~0x1; } - if (attn & HW_INTERRUT_ASSERT_SET_2) { + /* CStorm events: STAT_QUERY */ + if (status & 0x2) { + DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n"); + status &= ~0x2; + } - int port = BP_PORT(bp); - int reg_offset; + if (unlikely(status)) + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", + status); - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); + bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), + IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), + IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx), + IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx), + IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), + IGU_INT_ENABLE, 1); +} - val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); - REG_WR(bp, reg_offset, val); +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct bnx2x *bp = netdev_priv(dev); - BNX2X_ERR("FATAL HW block attention set2 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); - bnx2x_panic(); + /* Return here if interrupt is disabled */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) { + DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); + return IRQ_HANDLED; } -} -static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) -{ - u32 val; + bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0); - if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif - if (attn & BNX2X_PMF_LINK_ASSERT) { - int func = BP_FUNC(bp); +#ifdef BCM_CNIC + { + struct cnic_ops *c_ops; - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); - bp->mf_config = SHMEM_RD(bp, - mf_cfg.func_mf_config[func].config); - val = SHMEM_RD(bp, func_mb[func].drv_status); - if (val & DRV_STATUS_DCC_EVENT_MASK) - bnx2x_dcc_event(bp, - (val & DRV_STATUS_DCC_EVENT_MASK)); - bnx2x__link_status_update(bp); - if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) - bnx2x_pmf_update(bp); + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + } +#endif + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); - } else if (attn & BNX2X_MC_ASSERT_BITS) { + return IRQ_HANDLED; +} - BNX2X_ERR("MC assert!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); - bnx2x_panic(); +/* end of slow path */ - } else if (attn & BNX2X_MCP_ASSERT) { +static void bnx2x_timer(unsigned long data) +{ + struct bnx2x *bp = (struct bnx2x *) data; - BNX2X_ERR("MCP assert!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); - bnx2x_fw_dump(bp); + if (!netif_running(bp->dev)) + return; - } else - BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); - } + if (atomic_read(&bp->intr_sem) != 0) + goto timer_restart; - if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { - BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); - if (attn & BNX2X_GRC_TIMEOUT) { - val = CHIP_IS_E1H(bp) ? - REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; - BNX2X_ERR("GRC time-out 0x%08x\n", val); - } - if (attn & BNX2X_GRC_RSV) { - val = CHIP_IS_E1H(bp) ? - REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; - BNX2X_ERR("GRC reserved 0x%08x\n", val); + if (poll) { + struct bnx2x_fastpath *fp = &bp->fp[0]; + int rc; + + bnx2x_tx_int(fp); + rc = bnx2x_rx_int(fp, 1000); + } + + if (!BP_NOMCP(bp)) { + int func = BP_FUNC(bp); + u32 drv_pulse; + u32 mcp_pulse; + + ++bp->fw_drv_pulse_wr_seq; + bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; + /* TBD - add SYSTEM_TIME */ + drv_pulse = bp->fw_drv_pulse_wr_seq; + SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); + + mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & + MCP_PULSE_SEQ_MASK); + /* The delta between driver pulse and mcp response + * should be 1 (before mcp response) or 0 (after mcp response) + */ + if ((drv_pulse != mcp_pulse) && + (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { + /* someone lost a heartbeat... */ + BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", + drv_pulse, mcp_pulse); } - REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } + + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); + +timer_restart: + mod_timer(&bp->timer, jiffies + bp->current_interval); } -static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); -static int bnx2x_nic_load(struct bnx2x *bp, int load_mode); +/* end of Statistics */ +/* nic init */ -#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 -#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ -#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) -#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) -#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS -#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) /* - * should be run under rtnl lock + * nic init service functions */ -static inline void bnx2x_set_reset_done(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - val &= ~(1 << RESET_DONE_FLAG_SHIFT); - REG_WR(bp, BNX2X_MISC_GEN_REG, val); - barrier(); - mmiowb(); -} -/* - * should be run under rtnl lock - */ -static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp) +static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) { - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - val |= (1 << 16); - REG_WR(bp, BNX2X_MISC_GEN_REG, val); - barrier(); - mmiowb(); -} + int port = BP_PORT(bp); -/* - * should be run under rtnl lock - */ -static inline bool bnx2x_reset_is_done(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); - return (val & RESET_DONE_FLAG_MASK) ? false : true; + /* "CSTORM" */ + bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + + CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0, + CSTORM_SB_STATUS_BLOCK_U_SIZE / 4); + bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + + CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0, + CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); } -/* - * should be run under rtnl lock - */ -static inline void bnx2x_inc_load_cnt(struct bnx2x *bp) +void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, + dma_addr_t mapping, int sb_id) { - u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int index; + u64 section; - DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + /* USTORM */ + section = ((u64)mapping) + offsetof(struct host_status_block, + u_status_block); + sb->u_status_block.status_block_id = sb_id; - val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK; - REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); - barrier(); - mmiowb(); -} + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section)); + REG_WR(bp, BAR_CSTRORM_INTMEM + + ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4), + U64_HI(section)); + REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF + + CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func); -/* - * should be run under rtnl lock - */ -static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp) -{ - u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); + for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) + REG_WR16(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1); - DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + /* CSTORM */ + section = ((u64)mapping) + offsetof(struct host_status_block, + c_status_block); + sb->c_status_block.status_block_id = sb_id; - val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK; - REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); - barrier(); - mmiowb(); + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section)); + REG_WR(bp, BAR_CSTRORM_INTMEM + + ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4), + U64_HI(section)); + REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF + + CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func); - return val1; -} + for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) + REG_WR16(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1); -/* - * should be run under rtnl lock - */ -static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp) -{ - return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK; + bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); } -static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) +static void bnx2x_zero_def_sb(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK)); -} + int func = BP_FUNC(bp); -static inline void _print_next_block(int idx, const char *blk) -{ - if (idx) - pr_cont(", "); - pr_cont("%s", blk); + bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY + + TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, + sizeof(struct tstorm_def_status_block)/4); + bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + + CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0, + sizeof(struct cstorm_def_status_block_u)/4); + bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + + CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0, + sizeof(struct cstorm_def_status_block_c)/4); + bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY + + XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, + sizeof(struct xstorm_def_status_block)/4); } -static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) +static void bnx2x_init_def_sb(struct bnx2x *bp, + struct host_def_status_block *def_sb, + dma_addr_t mapping, int sb_id) { - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - _print_next_block(par_num++, "BRB"); - break; - case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - _print_next_block(par_num++, "PARSER"); - break; - case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - _print_next_block(par_num++, "TSDM"); - break; - case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - _print_next_block(par_num++, "SEARCHER"); - break; - case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - _print_next_block(par_num++, "TSEMI"); - break; - } + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int index, val, reg_offset; + u64 section; - /* Clear the bit */ - sig &= ~cur_bit; - } + /* ATTN */ + section = ((u64)mapping) + offsetof(struct host_def_status_block, + atten_status_block); + def_sb->atten_status_block.status_block_id = sb_id; + + bp->attn_state = 0; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + bp->attn_group[index].sig[0] = REG_RD(bp, + reg_offset + 0x10*index); + bp->attn_group[index].sig[1] = REG_RD(bp, + reg_offset + 0x4 + 0x10*index); + bp->attn_group[index].sig[2] = REG_RD(bp, + reg_offset + 0x8 + 0x10*index); + bp->attn_group[index].sig[3] = REG_RD(bp, + reg_offset + 0xc + 0x10*index); } - return par_num; -} + reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : + HC_REG_ATTN_MSG0_ADDR_L); -static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - _print_next_block(par_num++, "PBCLIENT"); - break; - case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: - _print_next_block(par_num++, "QM"); - break; - case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: - _print_next_block(par_num++, "XSDM"); - break; - case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: - _print_next_block(par_num++, "XSEMI"); - break; - case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: - _print_next_block(par_num++, "DOORBELLQ"); - break; - case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: - _print_next_block(par_num++, "VAUX PCI CORE"); - break; - case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: - _print_next_block(par_num++, "DEBUG"); - break; - case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: - _print_next_block(par_num++, "USDM"); - break; - case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: - _print_next_block(par_num++, "USEMI"); - break; - case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: - _print_next_block(par_num++, "UPB"); - break; - case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: - _print_next_block(par_num++, "CSDM"); - break; - } + REG_WR(bp, reg_offset, U64_LO(section)); + REG_WR(bp, reg_offset + 4, U64_HI(section)); - /* Clear the bit */ - sig &= ~cur_bit; - } - } + reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); - return par_num; -} + val = REG_RD(bp, reg_offset); + val |= sb_id; + REG_WR(bp, reg_offset, val); -static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - _print_next_block(par_num++, "CSEMI"); - break; - case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - _print_next_block(par_num++, "PXP"); - break; - case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: - _print_next_block(par_num++, - "PXPPCICLOCKCLIENT"); - break; - case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - _print_next_block(par_num++, "CFC"); - break; - case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - _print_next_block(par_num++, "CDU"); - break; - case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - _print_next_block(par_num++, "IGU"); - break; - case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - _print_next_block(par_num++, "MISC"); - break; - } + /* USTORM */ + section = ((u64)mapping) + offsetof(struct host_def_status_block, + u_def_status_block); + def_sb->u_def_status_block.status_block_id = sb_id; - /* Clear the bit */ - sig &= ~cur_bit; - } - } + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section)); + REG_WR(bp, BAR_CSTRORM_INTMEM + + ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4), + U64_HI(section)); + REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF + + CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func); - return par_num; -} + for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) + REG_WR16(bp, BAR_CSTRORM_INTMEM + + CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1); -static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: - _print_next_block(par_num++, "MCP ROM"); - break; - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: - _print_next_block(par_num++, "MCP UMP RX"); - break; - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: - _print_next_block(par_num++, "MCP UMP TX"); - break; - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: - _print_next_block(par_num++, "MCP SCPAD"); - break; - } + /* CSTORM */ + section = ((u64)mapping) + offsetof(struct host_def_status_block, + c_def_status_block); + def_sb->c_def_status_block.status_block_id = sb_id; - /* Clear the bit */ - sig &= ~cur_bit; - } - } + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section)); + REG_WR(bp, BAR_CSTRORM_INTMEM + + ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4), + U64_HI(section)); + REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + + CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func); - return par_num; -} + for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) + REG_WR16(bp, BAR_CSTRORM_INTMEM + + CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1); -static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1, - u32 sig2, u32 sig3) -{ - if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) || - (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) { - int par_num = 0; - DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " - "[0]:0x%08x [1]:0x%08x " - "[2]:0x%08x [3]:0x%08x\n", - sig0 & HW_PRTY_ASSERT_SET_0, - sig1 & HW_PRTY_ASSERT_SET_1, - sig2 & HW_PRTY_ASSERT_SET_2, - sig3 & HW_PRTY_ASSERT_SET_3); - printk(KERN_ERR"%s: Parity errors detected in blocks: ", - bp->dev->name); - par_num = bnx2x_print_blocks_with_parity0( - sig0 & HW_PRTY_ASSERT_SET_0, par_num); - par_num = bnx2x_print_blocks_with_parity1( - sig1 & HW_PRTY_ASSERT_SET_1, par_num); - par_num = bnx2x_print_blocks_with_parity2( - sig2 & HW_PRTY_ASSERT_SET_2, par_num); - par_num = bnx2x_print_blocks_with_parity3( - sig3 & HW_PRTY_ASSERT_SET_3, par_num); - printk("\n"); - return true; - } else - return false; -} + /* TSTORM */ + section = ((u64)mapping) + offsetof(struct host_def_status_block, + t_def_status_block); + def_sb->t_def_status_block.status_block_id = sb_id; -static bool bnx2x_chk_parity_attn(struct bnx2x *bp) -{ - struct attn_route attn; - int port = BP_PORT(bp); + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); + REG_WR(bp, BAR_TSTRORM_INTMEM + + ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), + U64_HI(section)); + REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + + TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); - attn.sig[0] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + - port*4); - attn.sig[1] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + - port*4); - attn.sig[2] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + - port*4); - attn.sig[3] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + - port*4); + for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) + REG_WR16(bp, BAR_TSTRORM_INTMEM + + TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); - return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2], - attn.sig[3]); + /* XSTORM */ + section = ((u64)mapping) + offsetof(struct host_def_status_block, + x_def_status_block); + def_sb->x_def_status_block.status_block_id = sb_id; + + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); + REG_WR(bp, BAR_XSTRORM_INTMEM + + ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), + U64_HI(section)); + REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + + XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); + + for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) + REG_WR16(bp, BAR_XSTRORM_INTMEM + + XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); + + bp->stats_pending = 0; + bp->set_mac_pending = 0; + + bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); } -static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) +void bnx2x_update_coalesce(struct bnx2x *bp) { - struct attn_route attn, *group_mask; int port = BP_PORT(bp); - int index; - u32 reg_addr; - u32 val; - u32 aeu_mask; + int i; - /* need to take HW lock because MCP or other port might also - try to handle this event */ - bnx2x_acquire_alr(bp); + for_each_queue(bp, i) { + int sb_id = bp->fp[i].sb_id; - if (bnx2x_chk_parity_attn(bp)) { - bp->recovery_state = BNX2X_RECOVERY_INIT; - bnx2x_set_reset_in_progress(bp); - schedule_delayed_work(&bp->reset_task, 0); - /* Disable HW interrupts */ - bnx2x_int_disable(bp); - bnx2x_release_alr(bp); - /* In case of parity errors don't handle attentions so that - * other function would "see" parity errors. - */ - return; + /* HC_INDEX_U_ETH_RX_CQ_CONS */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, + U_SB_ETH_RX_CQ_INDEX), + bp->rx_ticks/(4 * BNX2X_BTR)); + REG_WR16(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, + U_SB_ETH_RX_CQ_INDEX), + (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); + + /* HC_INDEX_C_ETH_TX_CQ_CONS */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, + C_SB_ETH_TX_CQ_INDEX), + bp->tx_ticks/(4 * BNX2X_BTR)); + REG_WR16(bp, BAR_CSTRORM_INTMEM + + CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, + C_SB_ETH_TX_CQ_INDEX), + (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); } +} - attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); - attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); - attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); - attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); - DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", - attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); +static void bnx2x_init_sp_ring(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); - for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { - if (deasserted & (1 << index)) { - group_mask = &bp->attn_group[index]; + spin_lock_init(&bp->spq_lock); - DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", - index, group_mask->sig[0], group_mask->sig[1], - group_mask->sig[2], group_mask->sig[3]); - - bnx2x_attn_int_deasserted3(bp, - attn.sig[3] & group_mask->sig[3]); - bnx2x_attn_int_deasserted1(bp, - attn.sig[1] & group_mask->sig[1]); - bnx2x_attn_int_deasserted2(bp, - attn.sig[2] & group_mask->sig[2]); - bnx2x_attn_int_deasserted0(bp, - attn.sig[0] & group_mask->sig[0]); - } - } - - bnx2x_release_alr(bp); - - reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); - - val = ~deasserted; - DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", - val, reg_addr); - REG_WR(bp, reg_addr, val); - - if (~bp->attn_state & deasserted) - BNX2X_ERR("IGU ERROR\n"); - - reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0; - - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - aeu_mask = REG_RD(bp, reg_addr); - - DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", - aeu_mask, deasserted); - aeu_mask |= (deasserted & 0x3ff); - DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + bp->spq_left = MAX_SPQ_PENDING; + bp->spq_prod_idx = 0; + bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; + bp->spq_prod_bd = bp->spq; + bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; - REG_WR(bp, reg_addr, aeu_mask); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func), + U64_LO(bp->spq_mapping)); + REG_WR(bp, + XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4, + U64_HI(bp->spq_mapping)); - DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); - bp->attn_state &= ~deasserted; - DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); + REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func), + bp->spq_prod_idx); } -static void bnx2x_attn_int(struct bnx2x *bp) +static void bnx2x_init_context(struct bnx2x *bp) { - /* read local copy of bits */ - u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. - attn_bits); - u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. - attn_bits_ack); - u32 attn_state = bp->attn_state; - - /* look for changed bits */ - u32 asserted = attn_bits & ~attn_ack & ~attn_state; - u32 deasserted = ~attn_bits & attn_ack & attn_state; - - DP(NETIF_MSG_HW, - "attn_bits %x attn_ack %x asserted %x deasserted %x\n", - attn_bits, attn_ack, asserted, deasserted); + int i; - if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) - BNX2X_ERR("BAD attention state\n"); + /* Rx */ + for_each_queue(bp, i) { + struct eth_context *context = bnx2x_sp(bp, context[i].eth); + struct bnx2x_fastpath *fp = &bp->fp[i]; + u8 cl_id = fp->cl_id; - /* handle bits that were raised */ - if (asserted) - bnx2x_attn_int_asserted(bp, asserted); + context->ustorm_st_context.common.sb_index_numbers = + BNX2X_RX_SB_INDEX_NUM; + context->ustorm_st_context.common.clientId = cl_id; + context->ustorm_st_context.common.status_block_id = fp->sb_id; + context->ustorm_st_context.common.flags = + (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT | + USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS); + context->ustorm_st_context.common.statistics_counter_id = + cl_id; + context->ustorm_st_context.common.mc_alignment_log_size = + BNX2X_RX_ALIGN_SHIFT; + context->ustorm_st_context.common.bd_buff_size = + bp->rx_buf_size; + context->ustorm_st_context.common.bd_page_base_hi = + U64_HI(fp->rx_desc_mapping); + context->ustorm_st_context.common.bd_page_base_lo = + U64_LO(fp->rx_desc_mapping); + if (!fp->disable_tpa) { + context->ustorm_st_context.common.flags |= + USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; + context->ustorm_st_context.common.sge_buff_size = + (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE, + 0xffff); + context->ustorm_st_context.common.sge_page_base_hi = + U64_HI(fp->rx_sge_mapping); + context->ustorm_st_context.common.sge_page_base_lo = + U64_LO(fp->rx_sge_mapping); - if (deasserted) - bnx2x_attn_int_deasserted(bp, deasserted); -} + context->ustorm_st_context.common.max_sges_for_packet = + SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; + context->ustorm_st_context.common.max_sges_for_packet = + ((context->ustorm_st_context.common. + max_sges_for_packet + PAGES_PER_SGE - 1) & + (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; + } -static void bnx2x_sp_task(struct work_struct *work) -{ - struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); - u16 status; + context->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), + CDU_REGION_NUMBER_UCM_AG, + ETH_CONNECTION_TYPE); - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return; + context->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), + CDU_REGION_NUMBER_XCM_AG, + ETH_CONNECTION_TYPE); } - status = bnx2x_update_dsb_idx(bp); -/* if (status == 0) */ -/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ + /* Tx */ + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct eth_context *context = + bnx2x_sp(bp, context[i].eth); - DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); + context->cstorm_st_context.sb_index_number = + C_SB_ETH_TX_CQ_INDEX; + context->cstorm_st_context.status_block_id = fp->sb_id; - /* HW attentions */ - if (status & 0x1) { - bnx2x_attn_int(bp); - status &= ~0x1; + context->xstorm_st_context.tx_bd_page_base_hi = + U64_HI(fp->tx_desc_mapping); + context->xstorm_st_context.tx_bd_page_base_lo = + U64_LO(fp->tx_desc_mapping); + context->xstorm_st_context.statistics_data = (fp->cl_id | + XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); } +} - /* CStorm events: STAT_QUERY */ - if (status & 0x2) { - DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n"); - status &= ~0x2; - } +static void bnx2x_init_ind_table(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + int i; - if (unlikely(status)) - DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", - status); + if (bp->multi_mode == ETH_RSS_MODE_DISABLED) + return; - bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), - IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), - IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx), - IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx), - IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), - IGU_INT_ENABLE, 1); + DP(NETIF_MSG_IFUP, + "Initializing indirection table multi_mode %d\n", bp->multi_mode); + for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, + bp->fp->cl_id + (i % bp->num_queues)); } -static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) +void bnx2x_set_client_config(struct bnx2x *bp) { - struct net_device *dev = dev_instance; - struct bnx2x *bp = netdev_priv(dev); + struct tstorm_eth_client_config tstorm_client = {0}; + int port = BP_PORT(bp); + int i; - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return IRQ_HANDLED; + tstorm_client.mtu = bp->dev->mtu; + tstorm_client.config_flags = + (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE | + TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE); +#ifdef BCM_VLAN + if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) { + tstorm_client.config_flags |= + TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE; + DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); } - - bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0); - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return IRQ_HANDLED; #endif -#ifdef BCM_CNIC - { - struct cnic_ops *c_ops; + for_each_queue(bp, i) { + tstorm_client.statistics_counter_id = bp->fp[i].cl_id; - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - c_ops->cnic_handler(bp->cnic_data, NULL); - rcu_read_unlock(); + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id), + ((u32 *)&tstorm_client)[0]); + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4, + ((u32 *)&tstorm_client)[1]); } -#endif - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); - return IRQ_HANDLED; + DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n", + ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); } -/* end of slow path */ - -/* Statistics */ +void bnx2x_set_storm_rx_mode(struct bnx2x *bp) +{ + struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; + int mode = bp->rx_mode; + int mask = bp->rx_mode_cl_mask; + int func = BP_FUNC(bp); + int port = BP_PORT(bp); + int i; + /* All but management unicast packets should pass to the host as well */ + u32 llh_mask = + NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | + NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST | + NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | + NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; -/**************************************************************************** -* Macros -****************************************************************************/ + DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask); -/* sum[hi:lo] += add[hi:lo] */ -#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ - do { \ - s_lo += a_lo; \ - s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ - } while (0) + switch (mode) { + case BNX2X_RX_MODE_NONE: /* no Rx */ + tstorm_mac_filter.ucast_drop_all = mask; + tstorm_mac_filter.mcast_drop_all = mask; + tstorm_mac_filter.bcast_drop_all = mask; + break; -/* difference = minuend - subtrahend */ -#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ - do { \ - if (m_lo < s_lo) { \ - /* underflow */ \ - d_hi = m_hi - s_hi; \ - if (d_hi > 0) { \ - /* we can 'loan' 1 */ \ - d_hi--; \ - d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ - } else { \ - /* m_hi <= s_hi */ \ - d_hi = 0; \ - d_lo = 0; \ - } \ - } else { \ - /* m_lo >= s_lo */ \ - if (m_hi < s_hi) { \ - d_hi = 0; \ - d_lo = 0; \ - } else { \ - /* m_hi >= s_hi */ \ - d_hi = m_hi - s_hi; \ - d_lo = m_lo - s_lo; \ - } \ - } \ - } while (0) - -#define UPDATE_STAT64(s, t) \ - do { \ - DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ - diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ - pstats->mac_stx[0].t##_hi = new->s##_hi; \ - pstats->mac_stx[0].t##_lo = new->s##_lo; \ - ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ - pstats->mac_stx[1].t##_lo, diff.lo); \ - } while (0) - -#define UPDATE_STAT64_NIG(s, t) \ - do { \ - DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ - diff.lo, new->s##_lo, old->s##_lo); \ - ADD_64(estats->t##_hi, diff.hi, \ - estats->t##_lo, diff.lo); \ - } while (0) - -/* sum[hi:lo] += add */ -#define ADD_EXTEND_64(s_hi, s_lo, a) \ - do { \ - s_lo += a; \ - s_hi += (s_lo < a) ? 1 : 0; \ - } while (0) - -#define UPDATE_EXTEND_STAT(s) \ - do { \ - ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ - pstats->mac_stx[1].s##_lo, \ - new->s); \ - } while (0) + case BNX2X_RX_MODE_NORMAL: + tstorm_mac_filter.bcast_accept_all = mask; + break; -#define UPDATE_EXTEND_TSTAT(s, t) \ - do { \ - diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ - old_tclient->s = tclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) + case BNX2X_RX_MODE_ALLMULTI: + tstorm_mac_filter.mcast_accept_all = mask; + tstorm_mac_filter.bcast_accept_all = mask; + break; -#define UPDATE_EXTEND_USTAT(s, t) \ - do { \ - diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ - old_uclient->s = uclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) + case BNX2X_RX_MODE_PROMISC: + tstorm_mac_filter.ucast_accept_all = mask; + tstorm_mac_filter.mcast_accept_all = mask; + tstorm_mac_filter.bcast_accept_all = mask; + /* pass management unicast packets as well */ + llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; + break; -#define UPDATE_EXTEND_XSTAT(s, t) \ - do { \ - diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ - old_xclient->s = xclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) + default: + BNX2X_ERR("BAD rx mode (%d)\n", mode); + break; + } -/* minuend -= subtrahend */ -#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ - do { \ - DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ - } while (0) + REG_WR(bp, + (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK), + llh_mask); -/* minuend[hi:lo] -= subtrahend */ -#define SUB_EXTEND_64(m_hi, m_lo, s) \ - do { \ - SUB_64(m_hi, 0, m_lo, s); \ - } while (0) + for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, + ((u32 *)&tstorm_mac_filter)[i]); -#define SUB_EXTEND_USTAT(s, t) \ - do { \ - diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ - SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) +/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, + ((u32 *)&tstorm_mac_filter)[i]); */ + } -/* - * General service functions - */ + if (mode != BNX2X_RX_MODE_NONE) + bnx2x_set_client_config(bp); +} -static inline long bnx2x_hilo(u32 *hiref) +static void bnx2x_init_internal_common(struct bnx2x *bp) { - u32 lo = *(hiref + 1); -#if (BITS_PER_LONG == 64) - u32 hi = *hiref; + int i; - return HILO_U64(hi, lo); -#else - return lo; -#endif + /* Zero this manually as its initialization is + currently missing in the initTool */ + for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_AGG_DATA_OFFSET + i * 4, 0); } -/* - * Init service functions - */ - -static void bnx2x_storm_stats_post(struct bnx2x *bp) +static void bnx2x_init_internal_port(struct bnx2x *bp) { - if (!bp->stats_pending) { - struct eth_query_ramrod_data ramrod_data = {0}; - int i, rc; - - spin_lock_bh(&bp->stats_lock); - - ramrod_data.drv_counter = bp->stats_counter++; - ramrod_data.collect_port = bp->port.pmf ? 1 : 0; - for_each_queue(bp, i) - ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); - - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, - ((u32 *)&ramrod_data)[1], - ((u32 *)&ramrod_data)[0], 0); - if (rc == 0) { - /* stats ramrod has it's own slot on the spq */ - bp->spq_left++; - bp->stats_pending = 1; - } + int port = BP_PORT(bp); - spin_unlock_bh(&bp->stats_lock); - } + REG_WR(bp, + BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR); + REG_WR(bp, + BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR); + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); } -static void bnx2x_hw_stats_post(struct bnx2x *bp) +static void bnx2x_init_internal_func(struct bnx2x *bp) { - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); + struct tstorm_eth_function_common_config tstorm_config = {0}; + struct stats_indication_flags stats_flags = {0}; + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int i, j; + u32 offset; + u16 max_agg_size; - *stats_comp = DMAE_COMP_VAL; - if (CHIP_REV_IS_SLOW(bp)) - return; + tstorm_config.config_flags = RSS_FLAGS(bp); - /* loader */ - if (bp->executer_idx) { - int loader_idx = PMF_DMAE_C(bp); + if (is_multi(bp)) + tstorm_config.rss_result_mask = MULTI_MASK; - memset(dmae, 0, sizeof(struct dmae_command)); + /* Enable TPA if needed */ + if (bp->flags & TPA_ENABLE_FLAG) + tstorm_config.config_flags |= + TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; - dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | - DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | - DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : - DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); - dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + - sizeof(struct dmae_command) * - (loader_idx + 1)) >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct dmae_command) >> 2; - if (CHIP_IS_E1(bp)) - dmae->len--; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; + if (IS_E1HMF(bp)) + tstorm_config.config_flags |= + TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM; - *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, loader_idx); + tstorm_config.leading_client_id = BP_L_ID(bp); - } else if (bp->func_stx) { - *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); - } -} + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), + (*(u32 *)&tstorm_config)); -static int bnx2x_stats_comp(struct bnx2x *bp) -{ - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - int cnt = 10; + bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ + bp->rx_mode_cl_mask = (1 << BP_L_ID(bp)); + bnx2x_set_storm_rx_mode(bp); - might_sleep(); - while (*stats_comp != DMAE_COMP_VAL) { - if (!cnt) { - BNX2X_ERR("timeout waiting for stats finished\n"); - break; - } - cnt--; - msleep(1); - } - return 1; -} + for_each_queue(bp, i) { + u8 cl_id = bp->fp[i].cl_id; -/* - * Statistics service functions - */ + /* reset xstorm per client statistics */ + offset = BAR_XSTRORM_INTMEM + + XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); + for (j = 0; + j < sizeof(struct xstorm_per_client_stats) / 4; j++) + REG_WR(bp, offset + j*4, 0); -static void bnx2x_stats_pmf_update(struct bnx2x *bp) -{ - struct dmae_command *dmae; - u32 opcode; - int loader_idx = PMF_DMAE_C(bp); - u32 *stats_comp = bnx2x_sp(bp, stats_comp); + /* reset tstorm per client statistics */ + offset = BAR_TSTRORM_INTMEM + + TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); + for (j = 0; + j < sizeof(struct tstorm_per_client_stats) / 4; j++) + REG_WR(bp, offset + j*4, 0); - /* sanity */ - if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) { - BNX2X_ERR("BUG!\n"); - return; + /* reset ustorm per client statistics */ + offset = BAR_USTRORM_INTMEM + + USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); + for (j = 0; + j < sizeof(struct ustorm_per_client_stats) / 4; j++) + REG_WR(bp, offset + j*4, 0); } - bp->executer_idx = 0; + /* Init statistics related context */ + stats_flags.collect_eth = 1; - opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | - DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); - dmae->src_addr_lo = bp->port.port_stx >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->len = DMAE_LEN32_RD_MAX; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); - dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + - DMAE_LEN32_RD_MAX * 4); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + - DMAE_LEN32_RD_MAX * 4); - dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); -} - -static void bnx2x_port_stats_init(struct bnx2x *bp) -{ - struct dmae_command *dmae; - int port = BP_PORT(bp); - int vn = BP_E1HVN(bp); - u32 opcode; - int loader_idx = PMF_DMAE_C(bp); - u32 mac_addr; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->link_vars.link_up || !bp->port.pmf) { - BNX2X_ERR("BUG!\n"); - return; - } + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), + ((u32 *)&stats_flags)[0]); + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4, + ((u32 *)&stats_flags)[1]); - bp->executer_idx = 0; + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), + ((u32 *)&stats_flags)[0]); + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4, + ((u32 *)&stats_flags)[1]); - /* MCP */ - opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | - DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (vn << DMAE_CMD_E1HVN_SHIFT)); - - if (bp->port.port_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_lo = bp->port.port_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } - - if (bp->func_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); - dmae->dst_addr_lo = bp->func_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } - - /* MAC */ - opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | - DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (vn << DMAE_CMD_E1HVN_SHIFT)); - - if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { - - mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM); - - /* BIGMAC_REGISTER_TX_STAT_GTPKT .. - BIGMAC_REGISTER_TX_STAT_GTBYT */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); - dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - /* BIGMAC_REGISTER_RX_STAT_GR64 .. - BIGMAC_REGISTER_RX_STAT_GRIPJ */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - BIGMAC_REGISTER_RX_STAT_GR64) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct bmac_stats, rx_stat_gr64_lo)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct bmac_stats, rx_stat_gr64_lo)); - dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - - BIGMAC_REGISTER_RX_STAT_GR64) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { - - mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); - - /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - EMAC_REG_EMAC_RX_STAT_AC) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); - dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - /* EMAC_REG_EMAC_RX_STAT_AC_28 */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, rx_stat_falsecarriererrors)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, rx_stat_falsecarriererrors)); - dmae->len = 1; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - EMAC_REG_EMAC_TX_STAT_AC) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); - dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } - - /* NIG */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : - NIG_REG_STAT0_BRB_DISCARD) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); - dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : - NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt0_lo)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt0_lo)); - dmae->len = (2*sizeof(u32)) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | - DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (vn << DMAE_CMD_E1HVN_SHIFT)); - dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : - NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt1_lo)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt1_lo)); - dmae->len = (2*sizeof(u32)) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; -} - -static void bnx2x_func_stats_init(struct bnx2x *bp) -{ - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } + REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func), + ((u32 *)&stats_flags)[0]); + REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4, + ((u32 *)&stats_flags)[1]); - bp->executer_idx = 0; - memset(dmae, 0, sizeof(struct dmae_command)); + REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), + ((u32 *)&stats_flags)[0]); + REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4, + ((u32 *)&stats_flags)[1]); - dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | - DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); - dmae->dst_addr_lo = bp->func_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), + U64_LO(bnx2x_sp_mapping(bp, fw_stats))); + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, + U64_HI(bnx2x_sp_mapping(bp, fw_stats))); - *stats_comp = 0; -} + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), + U64_LO(bnx2x_sp_mapping(bp, fw_stats))); + REG_WR(bp, BAR_TSTRORM_INTMEM + + TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, + U64_HI(bnx2x_sp_mapping(bp, fw_stats))); -static void bnx2x_stats_start(struct bnx2x *bp) -{ - if (bp->port.pmf) - bnx2x_port_stats_init(bp); - - else if (bp->func_stx) - bnx2x_func_stats_init(bp); - - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); -} - -static void bnx2x_stats_pmf_start(struct bnx2x *bp) -{ - bnx2x_stats_comp(bp); - bnx2x_stats_pmf_update(bp); - bnx2x_stats_start(bp); -} - -static void bnx2x_stats_restart(struct bnx2x *bp) -{ - bnx2x_stats_comp(bp); - bnx2x_stats_start(bp); -} - -static void bnx2x_bmac_stats_update(struct bnx2x *bp) -{ - struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); - struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct { - u32 lo; - u32 hi; - } diff; - - UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); - UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); - UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); - UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); - UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); - UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); - UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); - UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); - UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); - UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); - UPDATE_STAT64(tx_stat_gt127, - tx_stat_etherstatspkts65octetsto127octets); - UPDATE_STAT64(tx_stat_gt255, - tx_stat_etherstatspkts128octetsto255octets); - UPDATE_STAT64(tx_stat_gt511, - tx_stat_etherstatspkts256octetsto511octets); - UPDATE_STAT64(tx_stat_gt1023, - tx_stat_etherstatspkts512octetsto1023octets); - UPDATE_STAT64(tx_stat_gt1518, - tx_stat_etherstatspkts1024octetsto1522octets); - UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); - UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); - UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); - UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); - UPDATE_STAT64(tx_stat_gterr, - tx_stat_dot3statsinternalmactransmiterrors); - UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); - - estats->pause_frames_received_hi = - pstats->mac_stx[1].rx_stat_bmac_xpf_hi; - estats->pause_frames_received_lo = - pstats->mac_stx[1].rx_stat_bmac_xpf_lo; - - estats->pause_frames_sent_hi = - pstats->mac_stx[1].tx_stat_outxoffsent_hi; - estats->pause_frames_sent_lo = - pstats->mac_stx[1].tx_stat_outxoffsent_lo; -} - -static void bnx2x_emac_stats_update(struct bnx2x *bp) -{ - struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); - struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - - UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); - UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); - UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); - UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); - UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); - UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); - UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); - UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); - UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); - UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); - UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); - UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); - UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); - UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); - UPDATE_EXTEND_STAT(tx_stat_outxonsent); - UPDATE_EXTEND_STAT(tx_stat_outxoffsent); - UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); - UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); - UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); - UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); - UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); - UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); - UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); - UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); - - estats->pause_frames_received_hi = - pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; - estats->pause_frames_received_lo = - pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; - ADD_64(estats->pause_frames_received_hi, - pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, - estats->pause_frames_received_lo, - pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); - - estats->pause_frames_sent_hi = - pstats->mac_stx[1].tx_stat_outxonsent_hi; - estats->pause_frames_sent_lo = - pstats->mac_stx[1].tx_stat_outxonsent_lo; - ADD_64(estats->pause_frames_sent_hi, - pstats->mac_stx[1].tx_stat_outxoffsent_hi, - estats->pause_frames_sent_lo, - pstats->mac_stx[1].tx_stat_outxoffsent_lo); -} - -static int bnx2x_hw_stats_update(struct bnx2x *bp) -{ - struct nig_stats *new = bnx2x_sp(bp, nig_stats); - struct nig_stats *old = &(bp->port.old_nig_stats); - struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct { - u32 lo; - u32 hi; - } diff; - - if (bp->link_vars.mac_type == MAC_TYPE_BMAC) - bnx2x_bmac_stats_update(bp); - - else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) - bnx2x_emac_stats_update(bp); - - else { /* unreached */ - BNX2X_ERR("stats updated by DMAE but no MAC active\n"); - return -1; - } + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), + U64_LO(bnx2x_sp_mapping(bp, fw_stats))); + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, + U64_HI(bnx2x_sp_mapping(bp, fw_stats))); - ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, - new->brb_discard - old->brb_discard); - ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, - new->brb_truncate - old->brb_truncate); + if (CHIP_IS_E1H(bp)) { + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, + IS_E1HMF(bp)); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, + IS_E1HMF(bp)); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, + IS_E1HMF(bp)); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, + IS_E1HMF(bp)); - UPDATE_STAT64_NIG(egress_mac_pkt0, - etherstatspkts1024octetsto1522octets); - UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); + REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func), + bp->e1hov); + } - memcpy(old, new, sizeof(struct nig_stats)); + /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ + max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * + SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; - memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), - sizeof(struct mac_stx)); - estats->brb_drop_hi = pstats->brb_drop_hi; - estats->brb_drop_lo = pstats->brb_drop_lo; + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id), + U64_LO(fp->rx_comp_mapping)); + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4, + U64_HI(fp->rx_comp_mapping)); - pstats->host_port_stats_start = ++pstats->host_port_stats_end; + /* Next page */ + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id), + U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE)); + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4, + U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE)); - if (!BP_NOMCP(bp)) { - u32 nig_timer_max = - SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); - if (nig_timer_max != estats->nig_timer_max) { - estats->nig_timer_max = nig_timer_max; - BNX2X_ERR("NIG timer max (%u)\n", - estats->nig_timer_max); - } + REG_WR16(bp, BAR_USTRORM_INTMEM + + USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), + max_agg_size); } - return 0; -} + /* dropless flow control */ + if (CHIP_IS_E1H(bp)) { + struct ustorm_eth_rx_pause_data_e1h rx_pause = {0}; -static int bnx2x_storm_stats_update(struct bnx2x *bp) -{ - struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); - struct tstorm_per_port_stats *tport = - &stats->tstorm_common.port_statistics; - struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i; - u16 cur_stats_counter; + rx_pause.bd_thr_low = 250; + rx_pause.cqe_thr_low = 250; + rx_pause.cos = 1; + rx_pause.sge_thr_low = 0; + rx_pause.bd_thr_high = 350; + rx_pause.cqe_thr_high = 350; + rx_pause.sge_thr_high = 0; - /* Make sure we use the value of the counter - * used for sending the last stats ramrod. - */ - spin_lock_bh(&bp->stats_lock); - cur_stats_counter = bp->stats_counter - 1; - spin_unlock_bh(&bp->stats_lock); - - memcpy(&(fstats->total_bytes_received_hi), - &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); - estats->error_bytes_received_hi = 0; - estats->error_bytes_received_lo = 0; - estats->etherstatsoverrsizepkts_hi = 0; - estats->etherstatsoverrsizepkts_lo = 0; - estats->no_buff_discard_hi = 0; - estats->no_buff_discard_lo = 0; + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - int cl_id = fp->cl_id; - struct tstorm_per_client_stats *tclient = - &stats->tstorm_common.client_statistics[cl_id]; - struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; - struct ustorm_per_client_stats *uclient = - &stats->ustorm_common.client_statistics[cl_id]; - struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; - struct xstorm_per_client_stats *xclient = - &stats->xstorm_common.client_statistics[cl_id]; - struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - u32 diff; - - /* are storm stats valid? */ - if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" - " xstorm counter (0x%x) != stats_counter (0x%x)\n", - i, xclient->stats_counter, cur_stats_counter + 1); - return -1; - } - if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" - " tstorm counter (0x%x) != stats_counter (0x%x)\n", - i, tclient->stats_counter, cur_stats_counter + 1); - return -2; - } - if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" - " ustorm counter (0x%x) != stats_counter (0x%x)\n", - i, uclient->stats_counter, cur_stats_counter + 1); - return -4; - } + if (!fp->disable_tpa) { + rx_pause.sge_thr_low = 150; + rx_pause.sge_thr_high = 250; + } - qstats->total_bytes_received_hi = - le32_to_cpu(tclient->rcv_broadcast_bytes.hi); - qstats->total_bytes_received_lo = - le32_to_cpu(tclient->rcv_broadcast_bytes.lo); - - ADD_64(qstats->total_bytes_received_hi, - le32_to_cpu(tclient->rcv_multicast_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(tclient->rcv_multicast_bytes.lo)); - - ADD_64(qstats->total_bytes_received_hi, - le32_to_cpu(tclient->rcv_unicast_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(tclient->rcv_unicast_bytes.lo)); - - SUB_64(qstats->total_bytes_received_hi, - le32_to_cpu(uclient->bcast_no_buff_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); - - SUB_64(qstats->total_bytes_received_hi, - le32_to_cpu(uclient->mcast_no_buff_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); - - SUB_64(qstats->total_bytes_received_hi, - le32_to_cpu(uclient->ucast_no_buff_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); - - qstats->valid_bytes_received_hi = - qstats->total_bytes_received_hi; - qstats->valid_bytes_received_lo = - qstats->total_bytes_received_lo; - - qstats->error_bytes_received_hi = - le32_to_cpu(tclient->rcv_error_bytes.hi); - qstats->error_bytes_received_lo = - le32_to_cpu(tclient->rcv_error_bytes.lo); - - ADD_64(qstats->total_bytes_received_hi, - qstats->error_bytes_received_hi, - qstats->total_bytes_received_lo, - qstats->error_bytes_received_lo); - - UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, - total_unicast_packets_received); - UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, - total_multicast_packets_received); - UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, - total_broadcast_packets_received); - UPDATE_EXTEND_TSTAT(packets_too_big_discard, - etherstatsoverrsizepkts); - UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); - - SUB_EXTEND_USTAT(ucast_no_buff_pkts, - total_unicast_packets_received); - SUB_EXTEND_USTAT(mcast_no_buff_pkts, - total_multicast_packets_received); - SUB_EXTEND_USTAT(bcast_no_buff_pkts, - total_broadcast_packets_received); - UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); - - qstats->total_bytes_transmitted_hi = - le32_to_cpu(xclient->unicast_bytes_sent.hi); - qstats->total_bytes_transmitted_lo = - le32_to_cpu(xclient->unicast_bytes_sent.lo); - - ADD_64(qstats->total_bytes_transmitted_hi, - le32_to_cpu(xclient->multicast_bytes_sent.hi), - qstats->total_bytes_transmitted_lo, - le32_to_cpu(xclient->multicast_bytes_sent.lo)); - - ADD_64(qstats->total_bytes_transmitted_hi, - le32_to_cpu(xclient->broadcast_bytes_sent.hi), - qstats->total_bytes_transmitted_lo, - le32_to_cpu(xclient->broadcast_bytes_sent.lo)); - - UPDATE_EXTEND_XSTAT(unicast_pkts_sent, - total_unicast_packets_transmitted); - UPDATE_EXTEND_XSTAT(multicast_pkts_sent, - total_multicast_packets_transmitted); - UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, - total_broadcast_packets_transmitted); - - old_tclient->checksum_discard = tclient->checksum_discard; - old_tclient->ttl0_discard = tclient->ttl0_discard; - - ADD_64(fstats->total_bytes_received_hi, - qstats->total_bytes_received_hi, - fstats->total_bytes_received_lo, - qstats->total_bytes_received_lo); - ADD_64(fstats->total_bytes_transmitted_hi, - qstats->total_bytes_transmitted_hi, - fstats->total_bytes_transmitted_lo, - qstats->total_bytes_transmitted_lo); - ADD_64(fstats->total_unicast_packets_received_hi, - qstats->total_unicast_packets_received_hi, - fstats->total_unicast_packets_received_lo, - qstats->total_unicast_packets_received_lo); - ADD_64(fstats->total_multicast_packets_received_hi, - qstats->total_multicast_packets_received_hi, - fstats->total_multicast_packets_received_lo, - qstats->total_multicast_packets_received_lo); - ADD_64(fstats->total_broadcast_packets_received_hi, - qstats->total_broadcast_packets_received_hi, - fstats->total_broadcast_packets_received_lo, - qstats->total_broadcast_packets_received_lo); - ADD_64(fstats->total_unicast_packets_transmitted_hi, - qstats->total_unicast_packets_transmitted_hi, - fstats->total_unicast_packets_transmitted_lo, - qstats->total_unicast_packets_transmitted_lo); - ADD_64(fstats->total_multicast_packets_transmitted_hi, - qstats->total_multicast_packets_transmitted_hi, - fstats->total_multicast_packets_transmitted_lo, - qstats->total_multicast_packets_transmitted_lo); - ADD_64(fstats->total_broadcast_packets_transmitted_hi, - qstats->total_broadcast_packets_transmitted_hi, - fstats->total_broadcast_packets_transmitted_lo, - qstats->total_broadcast_packets_transmitted_lo); - ADD_64(fstats->valid_bytes_received_hi, - qstats->valid_bytes_received_hi, - fstats->valid_bytes_received_lo, - qstats->valid_bytes_received_lo); - - ADD_64(estats->error_bytes_received_hi, - qstats->error_bytes_received_hi, - estats->error_bytes_received_lo, - qstats->error_bytes_received_lo); - ADD_64(estats->etherstatsoverrsizepkts_hi, - qstats->etherstatsoverrsizepkts_hi, - estats->etherstatsoverrsizepkts_lo, - qstats->etherstatsoverrsizepkts_lo); - ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, - estats->no_buff_discard_lo, qstats->no_buff_discard_lo); - } - - ADD_64(fstats->total_bytes_received_hi, - estats->rx_stat_ifhcinbadoctets_hi, - fstats->total_bytes_received_lo, - estats->rx_stat_ifhcinbadoctets_lo); - - memcpy(estats, &(fstats->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); - - ADD_64(estats->etherstatsoverrsizepkts_hi, - estats->rx_stat_dot3statsframestoolong_hi, - estats->etherstatsoverrsizepkts_lo, - estats->rx_stat_dot3statsframestoolong_lo); - ADD_64(estats->error_bytes_received_hi, - estats->rx_stat_ifhcinbadoctets_hi, - estats->error_bytes_received_lo, - estats->rx_stat_ifhcinbadoctets_lo); - if (bp->port.pmf) { - estats->mac_filter_discard = - le32_to_cpu(tport->mac_filter_discard); - estats->xxoverflow_discard = - le32_to_cpu(tport->xxoverflow_discard); - estats->brb_truncate_discard = - le32_to_cpu(tport->brb_truncate_discard); - estats->mac_discard = le32_to_cpu(tport->mac_discard); + offset = BAR_USTRORM_INTMEM + + USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, + fp->cl_id); + for (j = 0; + j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4; + j++) + REG_WR(bp, offset + j*4, + ((u32 *)&rx_pause)[j]); + } } - fstats->host_func_stats_start = ++fstats->host_func_stats_end; - - bp->stats_pending = 0; - - return 0; -} - -static void bnx2x_net_stats_update(struct bnx2x *bp) -{ - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct net_device_stats *nstats = &bp->dev->stats; - int i; - - nstats->rx_packets = - bnx2x_hilo(&estats->total_unicast_packets_received_hi) + - bnx2x_hilo(&estats->total_multicast_packets_received_hi) + - bnx2x_hilo(&estats->total_broadcast_packets_received_hi); + memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); - nstats->tx_packets = - bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + - bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + - bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); + /* Init rate shaping and fairness contexts */ + if (IS_E1HMF(bp)) { + int vn; - nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); + /* During init there is no active link + Until link is up, set link rate to 10Gbps */ + bp->link_vars.line_speed = SPEED_10000; + bnx2x_init_port_minmax(bp); - nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); + if (!BP_NOMCP(bp)) + bp->mf_config = + SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); + bnx2x_calc_vn_weight_sum(bp); - nstats->rx_dropped = estats->mac_discard; - for_each_queue(bp, i) - nstats->rx_dropped += - le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); - - nstats->tx_dropped = 0; - - nstats->multicast = - bnx2x_hilo(&estats->total_multicast_packets_received_hi); - - nstats->collisions = - bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); - - nstats->rx_length_errors = - bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + - bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); - nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + - bnx2x_hilo(&estats->brb_truncate_hi); - nstats->rx_crc_errors = - bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); - nstats->rx_frame_errors = - bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); - nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); - nstats->rx_missed_errors = estats->xxoverflow_discard; - - nstats->rx_errors = nstats->rx_length_errors + - nstats->rx_over_errors + - nstats->rx_crc_errors + - nstats->rx_frame_errors + - nstats->rx_fifo_errors + - nstats->rx_missed_errors; - - nstats->tx_aborted_errors = - bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + - bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); - nstats->tx_carrier_errors = - bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); - nstats->tx_fifo_errors = 0; - nstats->tx_heartbeat_errors = 0; - nstats->tx_window_errors = 0; - - nstats->tx_errors = nstats->tx_aborted_errors + - nstats->tx_carrier_errors + - bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); -} - -static void bnx2x_drv_stats_update(struct bnx2x *bp) -{ - struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i; + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, 2*vn + port); - estats->driver_xoff = 0; - estats->rx_err_discard_pkt = 0; - estats->rx_skb_alloc_failed = 0; - estats->hw_csum_err = 0; - for_each_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; + /* Enable rate shaping and fairness */ + bp->cmng.flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; - estats->driver_xoff += qstats->driver_xoff; - estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; - estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; - estats->hw_csum_err += qstats->hw_csum_err; + } else { + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode minmax will be disabled\n"); } -} - -static void bnx2x_stats_update(struct bnx2x *bp) -{ - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - if (*stats_comp != DMAE_COMP_VAL) - return; + /* Store cmng structures to internal memory */ if (bp->port.pmf) - bnx2x_hw_stats_update(bp); - - if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { - BNX2X_ERR("storm stats were not updated for 3 times\n"); - bnx2x_panic(); - return; - } - - bnx2x_net_stats_update(bp); - bnx2x_drv_stats_update(bp); + for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4, + ((u32 *)(&bp->cmng))[i]); +} - if (netif_msg_timer(bp)) { - struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i; +static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) +{ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON: + bnx2x_init_internal_common(bp); + /* no break */ - printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", - bp->dev->name, - estats->brb_drop_lo, estats->brb_truncate_lo); + case FW_MSG_CODE_DRV_LOAD_PORT: + bnx2x_init_internal_port(bp); + /* no break */ - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - - printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)" - " rx pkt(%lu) rx calls(%lu %lu)\n", - fp->name, (le16_to_cpu(*fp->rx_cons_sb) - - fp->rx_comp_cons), - le16_to_cpu(*fp->rx_cons_sb), - bnx2x_hilo(&qstats-> - total_unicast_packets_received_hi), - fp->rx_calls, fp->rx_pkt); - } + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + bnx2x_init_internal_func(bp); + break; - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - struct netdev_queue *txq = - netdev_get_tx_queue(bp->dev, i); - - printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)" - " tx pkt(%lu) tx calls (%lu)" - " %s (Xoff events %u)\n", - fp->name, bnx2x_tx_avail(fp), - le16_to_cpu(*fp->tx_cons_sb), - bnx2x_hilo(&qstats-> - total_unicast_packets_transmitted_hi), - fp->tx_pkt, - (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"), - qstats->driver_xoff); - } + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + break; } - - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); } -static void bnx2x_port_stats_stop(struct bnx2x *bp) +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) { - struct dmae_command *dmae; - u32 opcode; - int loader_idx = PMF_DMAE_C(bp); - u32 *stats_comp = bnx2x_sp(bp, stats_comp); + int i; - bp->executer_idx = 0; + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; - opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | - DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | + fp->bp = bp; + fp->state = BNX2X_FP_STATE_CLOSED; + fp->index = i; + fp->cl_id = BP_L_ID(bp) + i; +#ifdef BCM_CNIC + fp->sb_id = fp->cl_id + 1; #else - DMAE_CMD_ENDIANITY_DW_SWAP | + fp->sb_id = fp->cl_id; #endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - - if (bp->port.port_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - if (bp->func_stx) - dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); - else - dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_lo = bp->port.port_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; - if (bp->func_stx) { - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } else { - dmae->comp_addr_lo = - U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = - U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - } + DP(NETIF_MSG_IFUP, + "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", + i, bp, fp->status_blk, fp->cl_id, fp->sb_id); + bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, + fp->sb_id); + bnx2x_update_fpsb_idx(fp); } - if (bp->func_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); - dmae->dst_addr_lo = bp->func_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; + /* ensure status block indices were read */ + rmb(); - *stats_comp = 0; - } -} -static void bnx2x_stats_stop(struct bnx2x *bp) -{ - int update = 0; + bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping, + DEF_SB_ID); + bnx2x_update_dsb_idx(bp); + bnx2x_update_coalesce(bp); + bnx2x_init_rx_rings(bp); + bnx2x_init_tx_ring(bp); + bnx2x_init_sp_ring(bp); + bnx2x_init_context(bp); + bnx2x_init_internal(bp, load_code); + bnx2x_init_ind_table(bp); + bnx2x_stats_init(bp); - bnx2x_stats_comp(bp); + /* At this point, we are ready for interrupts */ + atomic_set(&bp->intr_sem, 0); - if (bp->port.pmf) - update = (bnx2x_hw_stats_update(bp) == 0); + /* flush all before enabling interrupts */ + mb(); + mmiowb(); - update |= (bnx2x_storm_stats_update(bp) == 0); + bnx2x_int_enable(bp); - if (update) { - bnx2x_net_stats_update(bp); + /* Check for SPIO5 */ + bnx2x_attn_int_deasserted0(bp, + REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & + AEU_INPUTS_ATTN_BITS_SPIO5); +} - if (bp->port.pmf) - bnx2x_port_stats_stop(bp); +/* end of nic init */ - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); - } -} +/* + * gzip service functions + */ -static void bnx2x_stats_do_nothing(struct bnx2x *bp) +static int bnx2x_gunzip_init(struct bnx2x *bp) { -} + bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, + &bp->gunzip_mapping, GFP_KERNEL); + if (bp->gunzip_buf == NULL) + goto gunzip_nomem1; -static const struct { - void (*action)(struct bnx2x *bp); - enum bnx2x_stats_state next_state; -} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { -/* state event */ -{ -/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, -/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, -/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, -/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} -}, -{ -/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, -/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, -/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, -/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} -} -}; + bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); + if (bp->strm == NULL) + goto gunzip_nomem2; -static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) -{ - enum bnx2x_stats_state state; + bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), + GFP_KERNEL); + if (bp->strm->workspace == NULL) + goto gunzip_nomem3; - if (unlikely(bp->panic)) - return; + return 0; - /* Protect a state change flow */ - spin_lock_bh(&bp->stats_lock); - state = bp->stats_state; - bp->stats_state = bnx2x_stats_stm[state][event].next_state; - spin_unlock_bh(&bp->stats_lock); +gunzip_nomem3: + kfree(bp->strm); + bp->strm = NULL; - bnx2x_stats_stm[state][event].action(bp); +gunzip_nomem2: + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; - if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) - DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", - state, event, bp->stats_state); +gunzip_nomem1: + netdev_err(bp->dev, "Cannot allocate firmware buffer for" + " un-compression\n"); + return -ENOMEM; } -static void bnx2x_port_stats_base_init(struct bnx2x *bp) +static void bnx2x_gunzip_end(struct bnx2x *bp) { - struct dmae_command *dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->port.pmf || !bp->port.port_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; + kfree(bp->strm->workspace); - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | - DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_lo = bp->port.port_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; + kfree(bp->strm); + bp->strm = NULL; - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); + if (bp->gunzip_buf) { + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; + } } -static void bnx2x_func_stats_base_init(struct bnx2x *bp) +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) { - int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX; - int port = BP_PORT(bp); - int func; - u32 func_stx; + int n, rc; - /* sanity */ - if (!bp->port.pmf || !bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; + /* check gzip header */ + if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { + BNX2X_ERR("Bad gzip header\n"); + return -EINVAL; } - /* save our func_stx */ - func_stx = bp->func_stx; + n = 10; - for (vn = VN_0; vn < vn_max; vn++) { - func = 2*vn + port; +#define FNAME 0x8 - bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); - bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); - } + if (zbuf[3] & FNAME) + while ((zbuf[n++] != 0) && (n < len)); - /* restore our func_stx */ - bp->func_stx = func_stx; -} + bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; + bp->strm->avail_in = len - n; + bp->strm->next_out = bp->gunzip_buf; + bp->strm->avail_out = FW_BUF_SIZE; -static void bnx2x_func_stats_base_update(struct bnx2x *bp) -{ - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); + rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); + if (rc != Z_OK) + return rc; - /* sanity */ - if (!bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } + rc = zlib_inflate(bp->strm, Z_FINISH); + if ((rc != Z_OK) && (rc != Z_STREAM_END)) + netdev_err(bp->dev, "Firmware decompression error: %s\n", + bp->strm->msg); - bp->executer_idx = 0; - memset(dmae, 0, sizeof(struct dmae_command)); + bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); + if (bp->gunzip_outlen & 0x3) + netdev_err(bp->dev, "Firmware decompression error:" + " gunzip_outlen (%d) not aligned\n", + bp->gunzip_outlen); + bp->gunzip_outlen >>= 2; - dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | - DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | -#ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | -#else - DMAE_CMD_ENDIANITY_DW_SWAP | -#endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - dmae->src_addr_lo = bp->func_stx >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; + zlib_inflateEnd(bp->strm); + + if (rc == Z_STREAM_END) + return 0; - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); + return rc; } -static void bnx2x_stats_init(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int i; +/* nic load/unload */ - bp->stats_pending = 0; - bp->executer_idx = 0; - bp->stats_counter = 0; +/* + * General service functions + */ - /* port and func stats for management */ - if (!BP_NOMCP(bp)) { - bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); - bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); - - } else { - bp->port.port_stx = 0; - bp->func_stx = 0; - } - DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", - bp->port.port_stx, bp->func_stx); - - /* port stats */ - memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); - bp->port.old_nig_stats.brb_discard = - REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); - bp->port.old_nig_stats.brb_truncate = - REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); - - /* function stats */ - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; +/* send a NIG loopback debug packet */ +static void bnx2x_lb_pckt(struct bnx2x *bp) +{ + u32 wb_write[3]; - memset(&fp->old_tclient, 0, - sizeof(struct tstorm_per_client_stats)); - memset(&fp->old_uclient, 0, - sizeof(struct ustorm_per_client_stats)); - memset(&fp->old_xclient, 0, - sizeof(struct xstorm_per_client_stats)); - memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); - } + /* Ethernet source and destination addresses */ + wb_write[0] = 0x55555555; + wb_write[1] = 0x55555555; + wb_write[2] = 0x20; /* SOP */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); - memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); - memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); + /* NON-IP protocol */ + wb_write[0] = 0x09000000; + wb_write[1] = 0x55555555; + wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); +} - bp->stats_state = STATS_STATE_DISABLED; +/* some of the internal memories + * are not directly readable from the driver + * to test them we send debug packets + */ +static int bnx2x_int_mem_test(struct bnx2x *bp) +{ + int factor; + int count, i; + u32 val = 0; - if (bp->port.pmf) { - if (bp->port.port_stx) - bnx2x_port_stats_base_init(bp); + if (CHIP_REV_IS_FPGA(bp)) + factor = 120; + else if (CHIP_REV_IS_EMUL(bp)) + factor = 200; + else + factor = 1; - if (bp->func_stx) - bnx2x_func_stats_base_init(bp); + DP(NETIF_MSG_HW, "start part1\n"); - } else if (bp->func_stx) - bnx2x_func_stats_base_update(bp); -} + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); -static void bnx2x_timer(unsigned long data) -{ - struct bnx2x *bp = (struct bnx2x *) data; + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); - if (!netif_running(bp->dev)) - return; + /* send Ethernet packet */ + bnx2x_lb_pckt(bp); - if (atomic_read(&bp->intr_sem) != 0) - goto timer_restart; + /* TODO do i reset NIG statistic? */ + /* Wait until NIG register shows 1 packet of size 0x10 */ + count = 1000 * factor; + while (count) { - if (poll) { - struct bnx2x_fastpath *fp = &bp->fp[0]; - int rc; + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0x10) + break; - bnx2x_tx_int(fp); - rc = bnx2x_rx_int(fp, 1000); + msleep(10); + count--; + } + if (val != 0x10) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -1; } - if (!BP_NOMCP(bp)) { - int func = BP_FUNC(bp); - u32 drv_pulse; - u32 mcp_pulse; - - ++bp->fw_drv_pulse_wr_seq; - bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; - /* TBD - add SYSTEM_TIME */ - drv_pulse = bp->fw_drv_pulse_wr_seq; - SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); + /* Wait until PRS register shows 1 packet */ + count = 1000 * factor; + while (count) { + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val == 1) + break; - mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & - MCP_PULSE_SEQ_MASK); - /* The delta between driver pulse and mcp response - * should be 1 (before mcp response) or 0 (after mcp response) - */ - if ((drv_pulse != mcp_pulse) && - (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { - /* someone lost a heartbeat... */ - BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", - drv_pulse, mcp_pulse); - } + msleep(10); + count--; + } + if (val != 0x1) { + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + return -2; } - if (bp->state == BNX2X_STATE_OPEN) - bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); - -timer_restart: - mod_timer(&bp->timer, jiffies + bp->current_interval); -} + /* Reset and init BRB, PRS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); -/* end of Statistics */ + DP(NETIF_MSG_HW, "part2\n"); -/* nic init */ + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); -/* - * nic init service functions - */ + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); -static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) -{ - int port = BP_PORT(bp); + /* send 10 Ethernet packets */ + for (i = 0; i < 10; i++) + bnx2x_lb_pckt(bp); - /* "CSTORM" */ - bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + - CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0, - CSTORM_SB_STATUS_BLOCK_U_SIZE / 4); - bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + - CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0, - CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); -} + /* Wait until NIG register shows 10 + 1 + packets of size 11*0x10 = 0xb0 */ + count = 1000 * factor; + while (count) { -static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, - dma_addr_t mapping, int sb_id) -{ - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int index; - u64 section; + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0xb0) + break; - /* USTORM */ - section = ((u64)mapping) + offsetof(struct host_status_block, - u_status_block); - sb->u_status_block.status_block_id = sb_id; + msleep(10); + count--; + } + if (val != 0xb0) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -3; + } - REG_WR(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section)); - REG_WR(bp, BAR_CSTRORM_INTMEM + - ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4), - U64_HI(section)); - REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF + - CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func); + /* Wait until PRS register shows 2 packets */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 2) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); - for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) - REG_WR16(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1); + /* Write 1 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); - /* CSTORM */ - section = ((u64)mapping) + offsetof(struct host_status_block, - c_status_block); - sb->c_status_block.status_block_id = sb_id; + /* Wait until PRS register shows 3 packets */ + msleep(10 * factor); + /* Wait until NIG register shows 1 packet of size 0x10 */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 3) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); - REG_WR(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section)); - REG_WR(bp, BAR_CSTRORM_INTMEM + - ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4), - U64_HI(section)); - REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF + - CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func); + /* clear NIG EOP FIFO */ + for (i = 0; i < 11; i++) + REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); + val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); + if (val != 1) { + BNX2X_ERR("clear of NIG failed\n"); + return -4; + } - for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) - REG_WR16(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1); + /* Reset and init BRB, PRS, NIG */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); +#ifndef BCM_CNIC + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); +#endif - bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); -} + /* Enable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); + REG_WR(bp, CFC_REG_DEBUG0, 0x0); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); -static void bnx2x_zero_def_sb(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); + DP(NETIF_MSG_HW, "done\n"); - bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY + - TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, - sizeof(struct tstorm_def_status_block)/4); - bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + - CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0, - sizeof(struct cstorm_def_status_block_u)/4); - bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + - CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0, - sizeof(struct cstorm_def_status_block_c)/4); - bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY + - XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, - sizeof(struct xstorm_def_status_block)/4); + return 0; /* OK */ } -static void bnx2x_init_def_sb(struct bnx2x *bp, - struct host_def_status_block *def_sb, - dma_addr_t mapping, int sb_id) +static void enable_blocks_attention(struct bnx2x *bp) { - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int index, val, reg_offset; - u64 section; - - /* ATTN */ - section = ((u64)mapping) + offsetof(struct host_def_status_block, - atten_status_block); - def_sb->atten_status_block.status_block_id = sb_id; - - bp->attn_state = 0; - - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); - - for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { - bp->attn_group[index].sig[0] = REG_RD(bp, - reg_offset + 0x10*index); - bp->attn_group[index].sig[1] = REG_RD(bp, - reg_offset + 0x4 + 0x10*index); - bp->attn_group[index].sig[2] = REG_RD(bp, - reg_offset + 0x8 + 0x10*index); - bp->attn_group[index].sig[3] = REG_RD(bp, - reg_offset + 0xc + 0x10*index); - } - - reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : - HC_REG_ATTN_MSG0_ADDR_L); - - REG_WR(bp, reg_offset, U64_LO(section)); - REG_WR(bp, reg_offset + 4, U64_HI(section)); - - reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); - - val = REG_RD(bp, reg_offset); - val |= sb_id; - REG_WR(bp, reg_offset, val); - - /* USTORM */ - section = ((u64)mapping) + offsetof(struct host_def_status_block, - u_def_status_block); - def_sb->u_def_status_block.status_block_id = sb_id; - - REG_WR(bp, BAR_CSTRORM_INTMEM + - CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section)); - REG_WR(bp, BAR_CSTRORM_INTMEM + - ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4), - U64_HI(section)); - REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF + - CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func); - - for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) - REG_WR16(bp, BAR_CSTRORM_INTMEM + - CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1); - - /* CSTORM */ - section = ((u64)mapping) + offsetof(struct host_def_status_block, - c_def_status_block); - def_sb->c_def_status_block.status_block_id = sb_id; - - REG_WR(bp, BAR_CSTRORM_INTMEM + - CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section)); - REG_WR(bp, BAR_CSTRORM_INTMEM + - ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4), - U64_HI(section)); - REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + - CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func); - - for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) - REG_WR16(bp, BAR_CSTRORM_INTMEM + - CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1); - - /* TSTORM */ - section = ((u64)mapping) + offsetof(struct host_def_status_block, - t_def_status_block); - def_sb->t_def_status_block.status_block_id = sb_id; - - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); - REG_WR(bp, BAR_TSTRORM_INTMEM + - ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), - U64_HI(section)); - REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + - TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); - - for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) - REG_WR16(bp, BAR_TSTRORM_INTMEM + - TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); - - /* XSTORM */ - section = ((u64)mapping) + offsetof(struct host_def_status_block, - x_def_status_block); - def_sb->x_def_status_block.status_block_id = sb_id; - - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); - REG_WR(bp, BAR_XSTRORM_INTMEM + - ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), - U64_HI(section)); - REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + - XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); - - for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) - REG_WR16(bp, BAR_XSTRORM_INTMEM + - XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); - - bp->stats_pending = 0; - bp->set_mac_pending = 0; - - bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); -} - -static void bnx2x_update_coalesce(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int i; - - for_each_queue(bp, i) { - int sb_id = bp->fp[i].sb_id; - - /* HC_INDEX_U_ETH_RX_CQ_CONS */ - REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, - U_SB_ETH_RX_CQ_INDEX), - bp->rx_ticks/(4 * BNX2X_BTR)); - REG_WR16(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, - U_SB_ETH_RX_CQ_INDEX), - (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); - - /* HC_INDEX_C_ETH_TX_CQ_CONS */ - REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, - C_SB_ETH_TX_CQ_INDEX), - bp->tx_ticks/(4 * BNX2X_BTR)); - REG_WR16(bp, BAR_CSTRORM_INTMEM + - CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, - C_SB_ETH_TX_CQ_INDEX), - (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); - } -} - -static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, - struct bnx2x_fastpath *fp, int last) -{ - int i; - - for (i = 0; i < last; i++) { - struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); - struct sk_buff *skb = rx_buf->skb; - - if (skb == NULL) { - DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); - continue; - } - - if (fp->tpa_state[i] == BNX2X_TPA_START) - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, DMA_FROM_DEVICE); - - dev_kfree_skb(skb); - rx_buf->skb = NULL; - } -} - -static void bnx2x_init_rx_rings(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H; - u16 ring_prod, cqe_ring_prod; - int i, j; - - bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; - DP(NETIF_MSG_IFUP, - "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); - - if (bp->flags & TPA_ENABLE_FLAG) { - - for_each_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; - - for (i = 0; i < max_agg_queues; i++) { - fp->tpa_pool[i].skb = - netdev_alloc_skb(bp->dev, bp->rx_buf_size); - if (!fp->tpa_pool[i].skb) { - BNX2X_ERR("Failed to allocate TPA " - "skb pool for queue[%d] - " - "disabling TPA on this " - "queue!\n", j); - bnx2x_free_tpa_pool(bp, fp, i); - fp->disable_tpa = 1; - break; - } - dma_unmap_addr_set((struct sw_rx_bd *) - &bp->fp->tpa_pool[i], - mapping, 0); - fp->tpa_state[i] = BNX2X_TPA_STOP; - } - } - } - - for_each_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; - - fp->rx_bd_cons = 0; - fp->rx_cons_sb = BNX2X_RX_SB_INDEX; - fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; - - /* "next page" elements initialization */ - /* SGE ring */ - for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { - struct eth_rx_sge *sge; - - sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; - sge->addr_hi = - cpu_to_le32(U64_HI(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - sge->addr_lo = - cpu_to_le32(U64_LO(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - } - - bnx2x_init_sge_ring_bit_mask(fp); - - /* RX BD ring */ - for (i = 1; i <= NUM_RX_RINGS; i++) { - struct eth_rx_bd *rx_bd; - - rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; - rx_bd->addr_hi = - cpu_to_le32(U64_HI(fp->rx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); - rx_bd->addr_lo = - cpu_to_le32(U64_LO(fp->rx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); - } - - /* CQ ring */ - for (i = 1; i <= NUM_RCQ_RINGS; i++) { - struct eth_rx_cqe_next_page *nextpg; - - nextpg = (struct eth_rx_cqe_next_page *) - &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; - nextpg->addr_hi = - cpu_to_le32(U64_HI(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - nextpg->addr_lo = - cpu_to_le32(U64_LO(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - } - - /* Allocate SGEs and initialize the ring elements */ - for (i = 0, ring_prod = 0; - i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { - - if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { - BNX2X_ERR("was only able to allocate " - "%d rx sges\n", i); - BNX2X_ERR("disabling TPA for queue[%d]\n", j); - /* Cleanup already allocated elements */ - bnx2x_free_rx_sge_range(bp, fp, ring_prod); - bnx2x_free_tpa_pool(bp, fp, max_agg_queues); - fp->disable_tpa = 1; - ring_prod = 0; - break; - } - ring_prod = NEXT_SGE_IDX(ring_prod); - } - fp->rx_sge_prod = ring_prod; - - /* Allocate BDs and initialize BD ring */ - fp->rx_comp_cons = 0; - cqe_ring_prod = ring_prod = 0; - for (i = 0; i < bp->rx_ring_size; i++) { - if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { - BNX2X_ERR("was only able to allocate " - "%d rx skbs on queue[%d]\n", i, j); - fp->eth_q_stats.rx_skb_alloc_failed++; - break; - } - ring_prod = NEXT_RX_IDX(ring_prod); - cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); - WARN_ON(ring_prod <= i); - } - - fp->rx_bd_prod = ring_prod; - /* must not have more available CQEs than BDs */ - fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, - cqe_ring_prod); - fp->rx_pkt = fp->rx_calls = 0; - - /* Warning! - * this will generate an interrupt (to the TSTORM) - * must only be done after chip is initialized - */ - bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod, - fp->rx_sge_prod); - if (j != 0) - continue; - - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), - U64_LO(fp->rx_comp_mapping)); - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, - U64_HI(fp->rx_comp_mapping)); - } -} - -static void bnx2x_init_tx_ring(struct bnx2x *bp) -{ - int i, j; - - for_each_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; - - for (i = 1; i <= NUM_TX_RINGS; i++) { - struct eth_tx_next_bd *tx_next_bd = - &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; - - tx_next_bd->addr_hi = - cpu_to_le32(U64_HI(fp->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - tx_next_bd->addr_lo = - cpu_to_le32(U64_LO(fp->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - } - - fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; - fp->tx_db.data.zero_fill1 = 0; - fp->tx_db.data.prod = 0; - - fp->tx_pkt_prod = 0; - fp->tx_pkt_cons = 0; - fp->tx_bd_prod = 0; - fp->tx_bd_cons = 0; - fp->tx_cons_sb = BNX2X_TX_SB_INDEX; - fp->tx_pkt = 0; - } -} - -static void bnx2x_init_sp_ring(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - - spin_lock_init(&bp->spq_lock); - - bp->spq_left = MAX_SPQ_PENDING; - bp->spq_prod_idx = 0; - bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; - bp->spq_prod_bd = bp->spq; - bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; - - REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func), - U64_LO(bp->spq_mapping)); - REG_WR(bp, - XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4, - U64_HI(bp->spq_mapping)); - - REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func), - bp->spq_prod_idx); -} - -static void bnx2x_init_context(struct bnx2x *bp) -{ - int i; - - /* Rx */ - for_each_queue(bp, i) { - struct eth_context *context = bnx2x_sp(bp, context[i].eth); - struct bnx2x_fastpath *fp = &bp->fp[i]; - u8 cl_id = fp->cl_id; - - context->ustorm_st_context.common.sb_index_numbers = - BNX2X_RX_SB_INDEX_NUM; - context->ustorm_st_context.common.clientId = cl_id; - context->ustorm_st_context.common.status_block_id = fp->sb_id; - context->ustorm_st_context.common.flags = - (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT | - USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS); - context->ustorm_st_context.common.statistics_counter_id = - cl_id; - context->ustorm_st_context.common.mc_alignment_log_size = - BNX2X_RX_ALIGN_SHIFT; - context->ustorm_st_context.common.bd_buff_size = - bp->rx_buf_size; - context->ustorm_st_context.common.bd_page_base_hi = - U64_HI(fp->rx_desc_mapping); - context->ustorm_st_context.common.bd_page_base_lo = - U64_LO(fp->rx_desc_mapping); - if (!fp->disable_tpa) { - context->ustorm_st_context.common.flags |= - USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; - context->ustorm_st_context.common.sge_buff_size = - (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE, - 0xffff); - context->ustorm_st_context.common.sge_page_base_hi = - U64_HI(fp->rx_sge_mapping); - context->ustorm_st_context.common.sge_page_base_lo = - U64_LO(fp->rx_sge_mapping); - - context->ustorm_st_context.common.max_sges_for_packet = - SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; - context->ustorm_st_context.common.max_sges_for_packet = - ((context->ustorm_st_context.common. - max_sges_for_packet + PAGES_PER_SGE - 1) & - (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; - } - - context->ustorm_ag_context.cdu_usage = - CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), - CDU_REGION_NUMBER_UCM_AG, - ETH_CONNECTION_TYPE); - - context->xstorm_ag_context.cdu_reserved = - CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), - CDU_REGION_NUMBER_XCM_AG, - ETH_CONNECTION_TYPE); - } - - /* Tx */ - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct eth_context *context = - bnx2x_sp(bp, context[i].eth); - - context->cstorm_st_context.sb_index_number = - C_SB_ETH_TX_CQ_INDEX; - context->cstorm_st_context.status_block_id = fp->sb_id; - - context->xstorm_st_context.tx_bd_page_base_hi = - U64_HI(fp->tx_desc_mapping); - context->xstorm_st_context.tx_bd_page_base_lo = - U64_LO(fp->tx_desc_mapping); - context->xstorm_st_context.statistics_data = (fp->cl_id | - XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); - } -} - -static void bnx2x_init_ind_table(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - int i; - - if (bp->multi_mode == ETH_RSS_MODE_DISABLED) - return; - - DP(NETIF_MSG_IFUP, - "Initializing indirection table multi_mode %d\n", bp->multi_mode); - for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, - bp->fp->cl_id + (i % bp->num_queues)); -} - -static void bnx2x_set_client_config(struct bnx2x *bp) -{ - struct tstorm_eth_client_config tstorm_client = {0}; - int port = BP_PORT(bp); - int i; - - tstorm_client.mtu = bp->dev->mtu; - tstorm_client.config_flags = - (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE | - TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE); -#ifdef BCM_VLAN - if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) { - tstorm_client.config_flags |= - TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE; - DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); - } -#endif - - for_each_queue(bp, i) { - tstorm_client.statistics_counter_id = bp->fp[i].cl_id; - - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id), - ((u32 *)&tstorm_client)[0]); - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4, - ((u32 *)&tstorm_client)[1]); - } - - DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n", - ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); -} - -static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) -{ - struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; - int mode = bp->rx_mode; - int mask = bp->rx_mode_cl_mask; - int func = BP_FUNC(bp); - int port = BP_PORT(bp); - int i; - /* All but management unicast packets should pass to the host as well */ - u32 llh_mask = - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST | - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; - - DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask); - - switch (mode) { - case BNX2X_RX_MODE_NONE: /* no Rx */ - tstorm_mac_filter.ucast_drop_all = mask; - tstorm_mac_filter.mcast_drop_all = mask; - tstorm_mac_filter.bcast_drop_all = mask; - break; - - case BNX2X_RX_MODE_NORMAL: - tstorm_mac_filter.bcast_accept_all = mask; - break; - - case BNX2X_RX_MODE_ALLMULTI: - tstorm_mac_filter.mcast_accept_all = mask; - tstorm_mac_filter.bcast_accept_all = mask; - break; - - case BNX2X_RX_MODE_PROMISC: - tstorm_mac_filter.ucast_accept_all = mask; - tstorm_mac_filter.mcast_accept_all = mask; - tstorm_mac_filter.bcast_accept_all = mask; - /* pass management unicast packets as well */ - llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; - break; - - default: - BNX2X_ERR("BAD rx mode (%d)\n", mode); - break; - } - - REG_WR(bp, - (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK), - llh_mask); - - for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, - ((u32 *)&tstorm_mac_filter)[i]); - -/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, - ((u32 *)&tstorm_mac_filter)[i]); */ - } - - if (mode != BNX2X_RX_MODE_NONE) - bnx2x_set_client_config(bp); -} - -static void bnx2x_init_internal_common(struct bnx2x *bp) -{ - int i; - - /* Zero this manually as its initialization is - currently missing in the initTool */ - for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_AGG_DATA_OFFSET + i * 4, 0); -} - -static void bnx2x_init_internal_port(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - - REG_WR(bp, - BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR); - REG_WR(bp, - BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR); - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); -} - -static void bnx2x_init_internal_func(struct bnx2x *bp) -{ - struct tstorm_eth_function_common_config tstorm_config = {0}; - struct stats_indication_flags stats_flags = {0}; - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int i, j; - u32 offset; - u16 max_agg_size; - - tstorm_config.config_flags = RSS_FLAGS(bp); - - if (is_multi(bp)) - tstorm_config.rss_result_mask = MULTI_MASK; - - /* Enable TPA if needed */ - if (bp->flags & TPA_ENABLE_FLAG) - tstorm_config.config_flags |= - TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; - - if (IS_E1HMF(bp)) - tstorm_config.config_flags |= - TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM; - - tstorm_config.leading_client_id = BP_L_ID(bp); - - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), - (*(u32 *)&tstorm_config)); - - bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ - bp->rx_mode_cl_mask = (1 << BP_L_ID(bp)); - bnx2x_set_storm_rx_mode(bp); - - for_each_queue(bp, i) { - u8 cl_id = bp->fp[i].cl_id; - - /* reset xstorm per client statistics */ - offset = BAR_XSTRORM_INTMEM + - XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); - for (j = 0; - j < sizeof(struct xstorm_per_client_stats) / 4; j++) - REG_WR(bp, offset + j*4, 0); - - /* reset tstorm per client statistics */ - offset = BAR_TSTRORM_INTMEM + - TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); - for (j = 0; - j < sizeof(struct tstorm_per_client_stats) / 4; j++) - REG_WR(bp, offset + j*4, 0); - - /* reset ustorm per client statistics */ - offset = BAR_USTRORM_INTMEM + - USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); - for (j = 0; - j < sizeof(struct ustorm_per_client_stats) / 4; j++) - REG_WR(bp, offset + j*4, 0); - } - - /* Init statistics related context */ - stats_flags.collect_eth = 1; - - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), - ((u32 *)&stats_flags)[0]); - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4, - ((u32 *)&stats_flags)[1]); - - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), - ((u32 *)&stats_flags)[0]); - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4, - ((u32 *)&stats_flags)[1]); - - REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func), - ((u32 *)&stats_flags)[0]); - REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4, - ((u32 *)&stats_flags)[1]); - - REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), - ((u32 *)&stats_flags)[0]); - REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4, - ((u32 *)&stats_flags)[1]); - - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), - U64_LO(bnx2x_sp_mapping(bp, fw_stats))); - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, - U64_HI(bnx2x_sp_mapping(bp, fw_stats))); - - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), - U64_LO(bnx2x_sp_mapping(bp, fw_stats))); - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, - U64_HI(bnx2x_sp_mapping(bp, fw_stats))); - - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), - U64_LO(bnx2x_sp_mapping(bp, fw_stats))); - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, - U64_HI(bnx2x_sp_mapping(bp, fw_stats))); - - if (CHIP_IS_E1H(bp)) { - REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, - IS_E1HMF(bp)); - REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, - IS_E1HMF(bp)); - REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, - IS_E1HMF(bp)); - REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, - IS_E1HMF(bp)); - - REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func), - bp->e1hov); - } - - /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ - max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * - SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id), - U64_LO(fp->rx_comp_mapping)); - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4, - U64_HI(fp->rx_comp_mapping)); - - /* Next page */ - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id), - U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE)); - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4, - U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE)); - - REG_WR16(bp, BAR_USTRORM_INTMEM + - USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), - max_agg_size); - } - - /* dropless flow control */ - if (CHIP_IS_E1H(bp)) { - struct ustorm_eth_rx_pause_data_e1h rx_pause = {0}; - - rx_pause.bd_thr_low = 250; - rx_pause.cqe_thr_low = 250; - rx_pause.cos = 1; - rx_pause.sge_thr_low = 0; - rx_pause.bd_thr_high = 350; - rx_pause.cqe_thr_high = 350; - rx_pause.sge_thr_high = 0; - - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - if (!fp->disable_tpa) { - rx_pause.sge_thr_low = 150; - rx_pause.sge_thr_high = 250; - } - - - offset = BAR_USTRORM_INTMEM + - USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, - fp->cl_id); - for (j = 0; - j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4; - j++) - REG_WR(bp, offset + j*4, - ((u32 *)&rx_pause)[j]); - } - } - - memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); - - /* Init rate shaping and fairness contexts */ - if (IS_E1HMF(bp)) { - int vn; - - /* During init there is no active link - Until link is up, set link rate to 10Gbps */ - bp->link_vars.line_speed = SPEED_10000; - bnx2x_init_port_minmax(bp); - - if (!BP_NOMCP(bp)) - bp->mf_config = - SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); - bnx2x_calc_vn_weight_sum(bp); - - for (vn = VN_0; vn < E1HVN_MAX; vn++) - bnx2x_init_vn_minmax(bp, 2*vn + port); - - /* Enable rate shaping and fairness */ - bp->cmng.flags.cmng_enables |= - CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; - - } else { - /* rate shaping and fairness are disabled */ - DP(NETIF_MSG_IFUP, - "single function mode minmax will be disabled\n"); - } - - - /* Store cmng structures to internal memory */ - if (bp->port.pmf) - for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4, - ((u32 *)(&bp->cmng))[i]); -} - -static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) -{ - switch (load_code) { - case FW_MSG_CODE_DRV_LOAD_COMMON: - bnx2x_init_internal_common(bp); - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_PORT: - bnx2x_init_internal_port(bp); - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_FUNCTION: - bnx2x_init_internal_func(bp); - break; - - default: - BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); - break; - } -} - -static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) -{ - int i; - - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - fp->bp = bp; - fp->state = BNX2X_FP_STATE_CLOSED; - fp->index = i; - fp->cl_id = BP_L_ID(bp) + i; -#ifdef BCM_CNIC - fp->sb_id = fp->cl_id + 1; -#else - fp->sb_id = fp->cl_id; -#endif - DP(NETIF_MSG_IFUP, - "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", - i, bp, fp->status_blk, fp->cl_id, fp->sb_id); - bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, - fp->sb_id); - bnx2x_update_fpsb_idx(fp); - } - - /* ensure status block indices were read */ - rmb(); - - - bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping, - DEF_SB_ID); - bnx2x_update_dsb_idx(bp); - bnx2x_update_coalesce(bp); - bnx2x_init_rx_rings(bp); - bnx2x_init_tx_ring(bp); - bnx2x_init_sp_ring(bp); - bnx2x_init_context(bp); - bnx2x_init_internal(bp, load_code); - bnx2x_init_ind_table(bp); - bnx2x_stats_init(bp); - - /* At this point, we are ready for interrupts */ - atomic_set(&bp->intr_sem, 0); - - /* flush all before enabling interrupts */ - mb(); - mmiowb(); - - bnx2x_int_enable(bp); - - /* Check for SPIO5 */ - bnx2x_attn_int_deasserted0(bp, - REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & - AEU_INPUTS_ATTN_BITS_SPIO5); -} - -/* end of nic init */ - -/* - * gzip service functions - */ - -static int bnx2x_gunzip_init(struct bnx2x *bp) -{ - bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, - &bp->gunzip_mapping, GFP_KERNEL); - if (bp->gunzip_buf == NULL) - goto gunzip_nomem1; - - bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); - if (bp->strm == NULL) - goto gunzip_nomem2; - - bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), - GFP_KERNEL); - if (bp->strm->workspace == NULL) - goto gunzip_nomem3; - - return 0; - -gunzip_nomem3: - kfree(bp->strm); - bp->strm = NULL; - -gunzip_nomem2: - dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, - bp->gunzip_mapping); - bp->gunzip_buf = NULL; - -gunzip_nomem1: - netdev_err(bp->dev, "Cannot allocate firmware buffer for" - " un-compression\n"); - return -ENOMEM; -} - -static void bnx2x_gunzip_end(struct bnx2x *bp) -{ - kfree(bp->strm->workspace); - - kfree(bp->strm); - bp->strm = NULL; - - if (bp->gunzip_buf) { - dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, - bp->gunzip_mapping); - bp->gunzip_buf = NULL; - } -} - -static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) -{ - int n, rc; - - /* check gzip header */ - if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { - BNX2X_ERR("Bad gzip header\n"); - return -EINVAL; - } - - n = 10; - -#define FNAME 0x8 - - if (zbuf[3] & FNAME) - while ((zbuf[n++] != 0) && (n < len)); - - bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; - bp->strm->avail_in = len - n; - bp->strm->next_out = bp->gunzip_buf; - bp->strm->avail_out = FW_BUF_SIZE; - - rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); - if (rc != Z_OK) - return rc; - - rc = zlib_inflate(bp->strm, Z_FINISH); - if ((rc != Z_OK) && (rc != Z_STREAM_END)) - netdev_err(bp->dev, "Firmware decompression error: %s\n", - bp->strm->msg); - - bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); - if (bp->gunzip_outlen & 0x3) - netdev_err(bp->dev, "Firmware decompression error:" - " gunzip_outlen (%d) not aligned\n", - bp->gunzip_outlen); - bp->gunzip_outlen >>= 2; - - zlib_inflateEnd(bp->strm); - - if (rc == Z_STREAM_END) - return 0; - - return rc; -} - -/* nic load/unload */ - -/* - * General service functions - */ - -/* send a NIG loopback debug packet */ -static void bnx2x_lb_pckt(struct bnx2x *bp) -{ - u32 wb_write[3]; - - /* Ethernet source and destination addresses */ - wb_write[0] = 0x55555555; - wb_write[1] = 0x55555555; - wb_write[2] = 0x20; /* SOP */ - REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); - - /* NON-IP protocol */ - wb_write[0] = 0x09000000; - wb_write[1] = 0x55555555; - wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ - REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); -} - -/* some of the internal memories - * are not directly readable from the driver - * to test them we send debug packets - */ -static int bnx2x_int_mem_test(struct bnx2x *bp) -{ - int factor; - int count, i; - u32 val = 0; - - if (CHIP_REV_IS_FPGA(bp)) - factor = 120; - else if (CHIP_REV_IS_EMUL(bp)) - factor = 200; - else - factor = 1; - - DP(NETIF_MSG_HW, "start part1\n"); - - /* Disable inputs of parser neighbor blocks */ - REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); - REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); - REG_WR(bp, CFC_REG_DEBUG0, 0x1); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); - - /* Write 0 to parser credits for CFC search request */ - REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); - - /* send Ethernet packet */ - bnx2x_lb_pckt(bp); - - /* TODO do i reset NIG statistic? */ - /* Wait until NIG register shows 1 packet of size 0x10 */ - count = 1000 * factor; - while (count) { - - bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); - val = *bnx2x_sp(bp, wb_data[0]); - if (val == 0x10) - break; - - msleep(10); - count--; - } - if (val != 0x10) { - BNX2X_ERR("NIG timeout val = 0x%x\n", val); - return -1; - } - - /* Wait until PRS register shows 1 packet */ - count = 1000 * factor; - while (count) { - val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); - if (val == 1) - break; - - msleep(10); - count--; - } - if (val != 0x1) { - BNX2X_ERR("PRS timeout val = 0x%x\n", val); - return -2; - } - - /* Reset and init BRB, PRS */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); - msleep(50); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); - msleep(50); - bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); - - DP(NETIF_MSG_HW, "part2\n"); - - /* Disable inputs of parser neighbor blocks */ - REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); - REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); - REG_WR(bp, CFC_REG_DEBUG0, 0x1); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); - - /* Write 0 to parser credits for CFC search request */ - REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); - - /* send 10 Ethernet packets */ - for (i = 0; i < 10; i++) - bnx2x_lb_pckt(bp); - - /* Wait until NIG register shows 10 + 1 - packets of size 11*0x10 = 0xb0 */ - count = 1000 * factor; - while (count) { - - bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); - val = *bnx2x_sp(bp, wb_data[0]); - if (val == 0xb0) - break; - - msleep(10); - count--; - } - if (val != 0xb0) { - BNX2X_ERR("NIG timeout val = 0x%x\n", val); - return -3; - } - - /* Wait until PRS register shows 2 packets */ - val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); - if (val != 2) - BNX2X_ERR("PRS timeout val = 0x%x\n", val); - - /* Write 1 to parser credits for CFC search request */ - REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); - - /* Wait until PRS register shows 3 packets */ - msleep(10 * factor); - /* Wait until NIG register shows 1 packet of size 0x10 */ - val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); - if (val != 3) - BNX2X_ERR("PRS timeout val = 0x%x\n", val); - - /* clear NIG EOP FIFO */ - for (i = 0; i < 11; i++) - REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); - val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); - if (val != 1) { - BNX2X_ERR("clear of NIG failed\n"); - return -4; - } - - /* Reset and init BRB, PRS, NIG */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); - msleep(50); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); - msleep(50); - bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); -#ifndef BCM_CNIC - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); -#endif - - /* Enable inputs of parser neighbor blocks */ - REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); - REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); - REG_WR(bp, CFC_REG_DEBUG0, 0x0); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); - - DP(NETIF_MSG_HW, "done\n"); - - return 0; /* OK */ -} - -static void enable_blocks_attention(struct bnx2x *bp) -{ - REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); - REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); - REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); - REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); - REG_WR(bp, QM_REG_QM_INT_MASK, 0); - REG_WR(bp, TM_REG_TM_INT_MASK, 0); - REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); - REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); - REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); -/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ -/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ - REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); - REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); - REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); -/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ -/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ - REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); - REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); - REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); - REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); -/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ -/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ - if (CHIP_REV_IS_FPGA(bp)) - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); - else - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); - REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); - REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); - REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); -/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ -/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ - REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); - REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); -/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ - REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ -} - -static const struct { - u32 addr; - u32 mask; -} bnx2x_parity_mask[] = { - {PXP_REG_PXP_PRTY_MASK, 0xffffffff}, - {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, - {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff}, - {HC_REG_HC_PRTY_MASK, 0xffffffff}, - {MISC_REG_MISC_PRTY_MASK, 0xffffffff}, - {QM_REG_QM_PRTY_MASK, 0x0}, - {DORQ_REG_DORQ_PRTY_MASK, 0x0}, - {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, - {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, - {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ - {CDU_REG_CDU_PRTY_MASK, 0x0}, - {CFC_REG_CFC_PRTY_MASK, 0x0}, - {DBG_REG_DBG_PRTY_MASK, 0x0}, - {DMAE_REG_DMAE_PRTY_MASK, 0x0}, - {BRB1_REG_BRB1_PRTY_MASK, 0x0}, - {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ - {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */ - {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ - {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */ - {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ - {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, - {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, - {USEM_REG_USEM_PRTY_MASK_0, 0x0}, - {USEM_REG_USEM_PRTY_MASK_1, 0x0}, - {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, - {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, - {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, - {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} -}; - -static void enable_blocks_parity(struct bnx2x *bp) -{ - int i, mask_arr_len = - sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0])); - - for (i = 0; i < mask_arr_len; i++) - REG_WR(bp, bnx2x_parity_mask[i].addr, - bnx2x_parity_mask[i].mask); -} - - -static void bnx2x_reset_common(struct bnx2x *bp) -{ - /* reset_common */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0xd3ffff7f); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); -} - -static void bnx2x_init_pxp(struct bnx2x *bp) -{ - u16 devctl; - int r_order, w_order; - - pci_read_config_word(bp->pdev, - bp->pcie_cap + PCI_EXP_DEVCTL, &devctl); - DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); - w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); - if (bp->mrrs == -1) - r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); - else { - DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); - r_order = bp->mrrs; - } - - bnx2x_init_pxp_arb(bp, r_order, w_order); -} - -static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) -{ - int is_required; - u32 val; - int port; - - if (BP_NOMCP(bp)) - return; - - is_required = 0; - val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & - SHARED_HW_CFG_FAN_FAILURE_MASK; - - if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) - is_required = 1; - - /* - * The fan failure mechanism is usually related to the PHY type since - * the power consumption of the board is affected by the PHY. Currently, - * fan is required for most designs with SFX7101, BCM8727 and BCM8481. - */ - else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) - for (port = PORT_0; port < PORT_MAX; port++) { - u32 phy_type = - SHMEM_RD(bp, dev_info.port_hw_config[port]. - external_phy_config) & - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; - is_required |= - ((phy_type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) || - (phy_type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || - (phy_type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481)); - } - - DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); - - if (is_required == 0) - return; - - /* Fan failure is indicated by SPIO 5 */ - bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, - MISC_REGISTERS_SPIO_INPUT_HI_Z); - - /* set to active low mode */ - val = REG_RD(bp, MISC_REG_SPIO_INT); - val |= ((1 << MISC_REGISTERS_SPIO_5) << - MISC_REGISTERS_SPIO_INT_OLD_SET_POS); - REG_WR(bp, MISC_REG_SPIO_INT, val); - - /* enable interrupt to signal the IGU */ - val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); - val |= (1 << MISC_REGISTERS_SPIO_5); - REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); -} - -static int bnx2x_init_common(struct bnx2x *bp) -{ - u32 val, i; -#ifdef BCM_CNIC - u32 wb_write[2]; -#endif - - DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); - - bnx2x_reset_common(bp); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); - - bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); - if (CHIP_IS_E1H(bp)) - REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp)); - - REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); - msleep(30); - REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); - - bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); - if (CHIP_IS_E1(bp)) { - /* enable HW interrupt from PXP on USDM overflow - bit 16 on INT_MASK_0 */ - REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); - } - - bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE); - bnx2x_init_pxp(bp); - -#ifdef __BIG_ENDIAN - REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); - /* make sure this value is 0 */ - REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); - -/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ - REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); -#endif - - REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); -#ifdef BCM_CNIC - REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); - REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); - REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); -#endif - - if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) - REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); - - /* let the HW do it's magic ... */ - msleep(100); - /* finish PXP init */ - val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); - if (val != 1) { - BNX2X_ERR("PXP2 CFG failed\n"); - return -EBUSY; - } - val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); - if (val != 1) { - BNX2X_ERR("PXP2 RD_INIT failed\n"); - return -EBUSY; - } - - REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); - REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); - - bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); - - /* clean the DMAE memory */ - bp->dmae_ready = 1; - bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); - - bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE); - - bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); - bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); - bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); - bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); - - bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); - -#ifdef BCM_CNIC - wb_write[0] = 0; - wb_write[1] = 0; - for (i = 0; i < 64; i++) { - REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16)); - bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2); - - if (CHIP_IS_E1H(bp)) { - REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16)); - bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8, - wb_write, 2); - } - } -#endif - /* soft reset pulse */ - REG_WR(bp, QM_REG_SOFT_RESET, 1); - REG_WR(bp, QM_REG_SOFT_RESET, 0); - -#ifdef BCM_CNIC - bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); -#endif - - bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); - REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); - if (!CHIP_REV_IS_SLOW(bp)) { - /* enable hw interrupt from doorbell Q */ - REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); - } - - bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); - REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); -#ifndef BCM_CNIC - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); -#endif - if (CHIP_IS_E1H(bp)) - REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); - - bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); - - bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - - bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); - - /* sync semi rtc */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0x80000000); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, - 0x80000000); - - bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); - - REG_WR(bp, SRC_REG_SOFT_RST, 1); - for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) - REG_WR(bp, i, random32()); - bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); -#ifdef BCM_CNIC - REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); - REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); - REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); - REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); - REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); - REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); - REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); - REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); - REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); - REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); -#endif - REG_WR(bp, SRC_REG_SOFT_RST, 0); - - if (sizeof(union cdu_context) != 1024) - /* we currently assume that a context is 1024 bytes */ - dev_alert(&bp->pdev->dev, "please adjust the size " - "of cdu_context(%ld)\n", - (long)sizeof(union cdu_context)); - - bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); - val = (4 << 24) + (0 << 12) + 1024; - REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); - - bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); - REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); - /* enable context validation interrupt from CFC */ - REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); - - /* set the thresholds to prevent CFC/CDU race */ - REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); - - bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); - - bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); - /* Reset PCIE errors for debug */ - REG_WR(bp, 0x2814, 0xffffffff); - REG_WR(bp, 0x3820, 0xffffffff); - - bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); - - bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); - if (CHIP_IS_E1H(bp)) { - REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp)); - REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp)); - } - - if (CHIP_REV_IS_SLOW(bp)) - msleep(200); - - /* finish CFC init */ - val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); - if (val != 1) { - BNX2X_ERR("CFC LL_INIT failed\n"); - return -EBUSY; - } - val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); - if (val != 1) { - BNX2X_ERR("CFC AC_INIT failed\n"); - return -EBUSY; - } - val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); - if (val != 1) { - BNX2X_ERR("CFC CAM_INIT failed\n"); - return -EBUSY; - } - REG_WR(bp, CFC_REG_DEBUG0, 0); - - /* read NIG statistic - to see if this is our first up since powerup */ - bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); - val = *bnx2x_sp(bp, wb_data[0]); - - /* do internal memory self test */ - if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) { - BNX2X_ERR("internal mem self test failed\n"); - return -EBUSY; - } - - switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - bp->port.need_hw_lock = 1; - break; - - default: - break; - } - - bnx2x_setup_fan_failure_detection(bp); - - /* clear PXP2 attentions */ - REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); - - enable_blocks_attention(bp); - if (CHIP_PARITY_SUPPORTED(bp)) - enable_blocks_parity(bp); - - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_common_init_phy(bp, bp->common.shmem_base); - bnx2x_release_phy_lock(bp); - } else - BNX2X_ERR("Bootcode is missing - can not initialize link\n"); - - return 0; -} - -static int bnx2x_init_port(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int init_stage = port ? PORT1_STAGE : PORT0_STAGE; - u32 low, high; - u32 val; - - DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); - - REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); - - bnx2x_init_block(bp, PXP_BLOCK, init_stage); - bnx2x_init_block(bp, PXP2_BLOCK, init_stage); - - bnx2x_init_block(bp, TCM_BLOCK, init_stage); - bnx2x_init_block(bp, UCM_BLOCK, init_stage); - bnx2x_init_block(bp, CCM_BLOCK, init_stage); - bnx2x_init_block(bp, XCM_BLOCK, init_stage); - -#ifdef BCM_CNIC - REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1); - - bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); - REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); - REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); -#endif - - bnx2x_init_block(bp, DQ_BLOCK, init_stage); - - bnx2x_init_block(bp, BRB1_BLOCK, init_stage); - if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) { - /* no pause for emulation and FPGA */ - low = 0; - high = 513; - } else { - if (IS_E1HMF(bp)) - low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); - else if (bp->dev->mtu > 4096) { - if (bp->flags & ONE_PORT_FLAG) - low = 160; - else { - val = bp->dev->mtu; - /* (24*1024 + val*4)/256 */ - low = 96 + (val/64) + ((val % 64) ? 1 : 0); - } - } else - low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); - high = low + 56; /* 14*1024/256 */ - } - REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); - REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); - - - bnx2x_init_block(bp, PRS_BLOCK, init_stage); - - bnx2x_init_block(bp, TSDM_BLOCK, init_stage); - bnx2x_init_block(bp, CSDM_BLOCK, init_stage); - bnx2x_init_block(bp, USDM_BLOCK, init_stage); - bnx2x_init_block(bp, XSDM_BLOCK, init_stage); - - bnx2x_init_block(bp, TSEM_BLOCK, init_stage); - bnx2x_init_block(bp, USEM_BLOCK, init_stage); - bnx2x_init_block(bp, CSEM_BLOCK, init_stage); - bnx2x_init_block(bp, XSEM_BLOCK, init_stage); - - bnx2x_init_block(bp, UPB_BLOCK, init_stage); - bnx2x_init_block(bp, XPB_BLOCK, init_stage); - - bnx2x_init_block(bp, PBF_BLOCK, init_stage); - - /* configure PBF to work without PAUSE mtu 9000 */ - REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); - - /* update threshold */ - REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); - /* update init credit */ - REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); - - /* probe changes */ - REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); - msleep(5); - REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); - -#ifdef BCM_CNIC - bnx2x_init_block(bp, SRCH_BLOCK, init_stage); -#endif - bnx2x_init_block(bp, CDU_BLOCK, init_stage); - bnx2x_init_block(bp, CFC_BLOCK, init_stage); - - if (CHIP_IS_E1(bp)) { - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); - } - bnx2x_init_block(bp, HC_BLOCK, init_stage); - - bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); - /* init aeu_mask_attn_func_0/1: - * - SF mode: bits 3-7 are masked. only bits 0-2 are in use - * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF - * bits 4-7 are used for "per vn group attention" */ - REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, - (IS_E1HMF(bp) ? 0xF7 : 0x7)); - - bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); - bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); - bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); - bnx2x_init_block(bp, DBU_BLOCK, init_stage); - bnx2x_init_block(bp, DBG_BLOCK, init_stage); - - bnx2x_init_block(bp, NIG_BLOCK, init_stage); - - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); - - if (CHIP_IS_E1H(bp)) { - /* 0x2 disable e1hov, 0x1 enable */ - REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, - (IS_E1HMF(bp) ? 0x1 : 0x2)); - - { - REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); - REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); - REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); - } - } - - bnx2x_init_block(bp, MCP_BLOCK, init_stage); - bnx2x_init_block(bp, DMAE_BLOCK, init_stage); - - switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - { - u32 swap_val, swap_override, aeu_gpio_mask, offset; - - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_INPUT_HI_Z, port); - - /* The GPIO should be swapped if the swap register is - set and active */ - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - - /* Select function upon port-swap configuration */ - if (port == 0) { - offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; - aeu_gpio_mask = (swap_val && swap_override) ? - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 : - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0; - } else { - offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; - aeu_gpio_mask = (swap_val && swap_override) ? - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 : - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1; - } - val = REG_RD(bp, offset); - /* add GPIO3 to group */ - val |= aeu_gpio_mask; - REG_WR(bp, offset, val); - } - break; - - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - /* add SPIO 5 to group 0 */ - { - u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); - val = REG_RD(bp, reg_addr); - val |= AEU_INPUTS_ATTN_BITS_SPIO5; - REG_WR(bp, reg_addr, val); - } - break; - - default: - break; - } - - bnx2x__link_reset(bp); - - return 0; -} - -#define ILT_PER_FUNC (768/2) -#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) -/* the phys address is shifted right 12 bits and has an added - 1=valid bit added to the 53rd bit - then since this is a wide register(TM) - we split it into two 32 bit writes - */ -#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) -#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) -#define PXP_ONE_ILT(x) (((x) << 10) | x) -#define PXP_ILT_RANGE(f, l) (((l) << 10) | f) - -#ifdef BCM_CNIC -#define CNIC_ILT_LINES 127 -#define CNIC_CTX_PER_ILT 16 -#else -#define CNIC_ILT_LINES 0 -#endif - -static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) -{ - int reg; - - if (CHIP_IS_E1H(bp)) - reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; - else /* E1 */ - reg = PXP2_REG_RQ_ONCHIP_AT + index*8; - - bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); -} - -static int bnx2x_init_func(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - u32 addr, val; - int i; - - DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); - - /* set MSI reconfigure capability */ - addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); - val = REG_RD(bp, addr); - val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; - REG_WR(bp, addr, val); - - i = FUNC_ILT_BASE(func); - - bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); - if (CHIP_IS_E1H(bp)) { - REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i); - REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES); - } else /* E1 */ - REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, - PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); - -#ifdef BCM_CNIC - i += 1 + CNIC_ILT_LINES; - bnx2x_ilt_wr(bp, i, bp->timers_mapping); - if (CHIP_IS_E1(bp)) - REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i)); - else { - REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i); - REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i); - } - - i++; - bnx2x_ilt_wr(bp, i, bp->qm_mapping); - if (CHIP_IS_E1(bp)) - REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i)); - else { - REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i); - REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i); - } - - i++; - bnx2x_ilt_wr(bp, i, bp->t1_mapping); - if (CHIP_IS_E1(bp)) - REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); - else { - REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i); - REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i); - } - - /* tell the searcher where the T2 table is */ - REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64); - - bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16, - U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping)); - - bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16, - U64_LO((u64)bp->t2_mapping + 16*1024 - 64), - U64_HI((u64)bp->t2_mapping + 16*1024 - 64)); - - REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10); -#endif - - if (CHIP_IS_E1H(bp)) { - bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); - - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); - } - - /* HC init per function */ - if (CHIP_IS_E1H(bp)) { - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); - - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); - } - bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func); - - /* Reset PCIE errors for debug */ - REG_WR(bp, 0x2114, 0xffffffff); - REG_WR(bp, 0x2120, 0xffffffff); - - return 0; -} - -static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) -{ - int i, rc = 0; - - DP(BNX2X_MSG_MCP, "function %d load_code %x\n", - BP_FUNC(bp), load_code); - - bp->dmae_ready = 0; - mutex_init(&bp->dmae_mutex); - rc = bnx2x_gunzip_init(bp); - if (rc) - return rc; - - switch (load_code) { - case FW_MSG_CODE_DRV_LOAD_COMMON: - rc = bnx2x_init_common(bp); - if (rc) - goto init_hw_err; - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_PORT: - bp->dmae_ready = 1; - rc = bnx2x_init_port(bp); - if (rc) - goto init_hw_err; - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_FUNCTION: - bp->dmae_ready = 1; - rc = bnx2x_init_func(bp); - if (rc) - goto init_hw_err; - break; - - default: - BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); - break; - } - - if (!BP_NOMCP(bp)) { - int func = BP_FUNC(bp); - - bp->fw_drv_pulse_wr_seq = - (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & - DRV_PULSE_SEQ_MASK); - DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); - } - - /* this needs to be done before gunzip end */ - bnx2x_zero_def_sb(bp); - for_each_queue(bp, i) - bnx2x_zero_sb(bp, BP_L_ID(bp) + i); -#ifdef BCM_CNIC - bnx2x_zero_sb(bp, BP_L_ID(bp) + i); -#endif - -init_hw_err: - bnx2x_gunzip_end(bp); - - return rc; -} - -static void bnx2x_free_mem(struct bnx2x *bp) -{ - -#define BNX2X_PCI_FREE(x, y, size) \ - do { \ - if (x) { \ - dma_free_coherent(&bp->pdev->dev, size, x, y); \ - x = NULL; \ - y = 0; \ - } \ - } while (0) - -#define BNX2X_FREE(x) \ - do { \ - if (x) { \ - vfree(x); \ - x = NULL; \ - } \ - } while (0) - - int i; - - /* fastpath */ - /* Common */ - for_each_queue(bp, i) { - - /* status blocks */ - BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), - bnx2x_fp(bp, i, status_blk_mapping), - sizeof(struct host_status_block)); - } - /* Rx */ - for_each_queue(bp, i) { - - /* fastpath rx rings: rx_buf rx_desc rx_comp */ - BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); - BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring), - bnx2x_fp(bp, i, rx_desc_mapping), - sizeof(struct eth_rx_bd) * NUM_RX_BD); - - BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring), - bnx2x_fp(bp, i, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); - - /* SGE ring */ - BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring)); - BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), - bnx2x_fp(bp, i, rx_sge_mapping), - BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); - } - /* Tx */ - for_each_queue(bp, i) { - - /* fastpath tx rings: tx_buf tx_desc */ - BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); - BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring), - bnx2x_fp(bp, i, tx_desc_mapping), - sizeof(union eth_tx_bd_types) * NUM_TX_BD); - } - /* end of fastpath */ - - BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, - sizeof(struct host_def_status_block)); - - BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, - sizeof(struct bnx2x_slowpath)); - -#ifdef BCM_CNIC - BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); - BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); - BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); - BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); - BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping, - sizeof(struct host_status_block)); -#endif - BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); - -#undef BNX2X_PCI_FREE -#undef BNX2X_KFREE -} - -static int bnx2x_alloc_mem(struct bnx2x *bp) -{ - -#define BNX2X_PCI_ALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - memset(x, 0, size); \ - } while (0) - -#define BNX2X_ALLOC(x, size) \ - do { \ - x = vmalloc(size); \ - if (x == NULL) \ - goto alloc_mem_err; \ - memset(x, 0, size); \ - } while (0) - - int i; - - /* fastpath */ - /* Common */ - for_each_queue(bp, i) { - bnx2x_fp(bp, i, bp) = bp; - - /* status blocks */ - BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), - &bnx2x_fp(bp, i, status_blk_mapping), - sizeof(struct host_status_block)); - } - /* Rx */ - for_each_queue(bp, i) { - - /* fastpath rx rings: rx_buf rx_desc rx_comp */ - BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), - sizeof(struct sw_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring), - &bnx2x_fp(bp, i, rx_desc_mapping), - sizeof(struct eth_rx_bd) * NUM_RX_BD); - - BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring), - &bnx2x_fp(bp, i, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); - - /* SGE ring */ - BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring), - sizeof(struct sw_rx_page) * NUM_RX_SGE); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring), - &bnx2x_fp(bp, i, rx_sge_mapping), - BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); - } - /* Tx */ - for_each_queue(bp, i) { - - /* fastpath tx rings: tx_buf tx_desc */ - BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), - sizeof(struct sw_tx_bd) * NUM_TX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring), - &bnx2x_fp(bp, i, tx_desc_mapping), - sizeof(union eth_tx_bd_types) * NUM_TX_BD); - } - /* end of fastpath */ - - BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, - sizeof(struct host_def_status_block)); - - BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, - sizeof(struct bnx2x_slowpath)); - -#ifdef BCM_CNIC - BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); - - /* allocate searcher T2 table - we allocate 1/4 of alloc num for T2 - (which is not entered into the ILT) */ - BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); - - /* Initialize T2 (for 1024 connections) */ - for (i = 0; i < 16*1024; i += 64) - *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; - - /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */ - BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); - - /* QM queues (128*MAX_CONN) */ - BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); - - BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping, - sizeof(struct host_status_block)); -#endif - - /* Slow path ring */ - BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); - - return 0; - -alloc_mem_err: - bnx2x_free_mem(bp); - return -ENOMEM; - -#undef BNX2X_PCI_ALLOC -#undef BNX2X_ALLOC -} - -static void bnx2x_free_tx_skbs(struct bnx2x *bp) -{ - int i; - - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - u16 bd_cons = fp->tx_bd_cons; - u16 sw_prod = fp->tx_pkt_prod; - u16 sw_cons = fp->tx_pkt_cons; - - while (sw_cons != sw_prod) { - bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); - sw_cons++; - } - } -} - -static void bnx2x_free_rx_skbs(struct bnx2x *bp) -{ - int i, j; - - for_each_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; - - for (i = 0; i < NUM_RX_BD; i++) { - struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; - struct sk_buff *skb = rx_buf->skb; - - if (skb == NULL) - continue; - - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, DMA_FROM_DEVICE); - - rx_buf->skb = NULL; - dev_kfree_skb(skb); - } - if (!fp->disable_tpa) - bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? - ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H); - } -} - -static void bnx2x_free_skbs(struct bnx2x *bp) -{ - bnx2x_free_tx_skbs(bp); - bnx2x_free_rx_skbs(bp); -} - -static void bnx2x_free_msix_irqs(struct bnx2x *bp) -{ - int i, offset = 1; - - free_irq(bp->msix_table[0].vector, bp->dev); - DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", - bp->msix_table[0].vector); - -#ifdef BCM_CNIC - offset++; -#endif - for_each_queue(bp, i) { - DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " - "state %x\n", i, bp->msix_table[i + offset].vector, - bnx2x_fp(bp, i, state)); - - free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); - } -} - -static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) -{ - if (bp->flags & USING_MSIX_FLAG) { - if (!disable_only) - bnx2x_free_msix_irqs(bp); - pci_disable_msix(bp->pdev); - bp->flags &= ~USING_MSIX_FLAG; - - } else if (bp->flags & USING_MSI_FLAG) { - if (!disable_only) - free_irq(bp->pdev->irq, bp->dev); - pci_disable_msi(bp->pdev); - bp->flags &= ~USING_MSI_FLAG; - - } else if (!disable_only) - free_irq(bp->pdev->irq, bp->dev); -} - -static int bnx2x_enable_msix(struct bnx2x *bp) -{ - int i, rc, offset = 1; - int igu_vec = 0; - - bp->msix_table[0].entry = igu_vec; - DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); - -#ifdef BCM_CNIC - igu_vec = BP_L_ID(bp) + offset; - bp->msix_table[1].entry = igu_vec; - DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); - offset++; -#endif - for_each_queue(bp, i) { - igu_vec = BP_L_ID(bp) + offset + i; - bp->msix_table[i + offset].entry = igu_vec; - DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " - "(fastpath #%u)\n", i + offset, igu_vec, i); - } - - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], - BNX2X_NUM_QUEUES(bp) + offset); - - /* - * reconfigure number of tx/rx queues according to available - * MSI-X vectors - */ - if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { - /* vectors available for FP */ - int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; - - DP(NETIF_MSG_IFUP, - "Trying to use less MSI-X vectors: %d\n", rc); - - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); - - if (rc) { - DP(NETIF_MSG_IFUP, - "MSI-X is not attainable rc %d\n", rc); - return rc; - } - - bp->num_queues = min(bp->num_queues, fp_vec); - - DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", - bp->num_queues); - } else if (rc) { - DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); - return rc; - } - - bp->flags |= USING_MSIX_FLAG; - - return 0; -} - -static int bnx2x_req_msix_irqs(struct bnx2x *bp) -{ - int i, rc, offset = 1; - - rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, - bp->dev->name, bp->dev); - if (rc) { - BNX2X_ERR("request sp irq failed\n"); - return -EBUSY; - } - -#ifdef BCM_CNIC - offset++; -#endif - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", - bp->dev->name, i); - - rc = request_irq(bp->msix_table[i + offset].vector, - bnx2x_msix_fp_int, 0, fp->name, fp); - if (rc) { - BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); - bnx2x_free_msix_irqs(bp); - return -EBUSY; - } - - fp->state = BNX2X_FP_STATE_IRQ; - } - - i = BNX2X_NUM_QUEUES(bp); - netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" - " ... fp[%d] %d\n", - bp->msix_table[0].vector, - 0, bp->msix_table[offset].vector, - i - 1, bp->msix_table[offset + i - 1].vector); - - return 0; -} - -static int bnx2x_enable_msi(struct bnx2x *bp) -{ - int rc; - - rc = pci_enable_msi(bp->pdev); - if (rc) { - DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); - return -1; - } - bp->flags |= USING_MSI_FLAG; - - return 0; -} - -static int bnx2x_req_irq(struct bnx2x *bp) -{ - unsigned long flags; - int rc; - - if (bp->flags & USING_MSI_FLAG) - flags = 0; - else - flags = IRQF_SHARED; - - rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, - bp->dev->name, bp->dev); - if (!rc) - bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; - - return rc; -} - -static void bnx2x_napi_enable(struct bnx2x *bp) -{ - int i; - - for_each_queue(bp, i) - napi_enable(&bnx2x_fp(bp, i, napi)); -} - -static void bnx2x_napi_disable(struct bnx2x *bp) -{ - int i; - - for_each_queue(bp, i) - napi_disable(&bnx2x_fp(bp, i, napi)); -} - -static void bnx2x_netif_start(struct bnx2x *bp) -{ - int intr_sem; - - intr_sem = atomic_dec_and_test(&bp->intr_sem); - smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ - - if (intr_sem) { - if (netif_running(bp->dev)) { - bnx2x_napi_enable(bp); - bnx2x_int_enable(bp); - if (bp->state == BNX2X_STATE_OPEN) - netif_tx_wake_all_queues(bp->dev); - } - } -} - -static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) -{ - bnx2x_int_disable_sync(bp, disable_hw); - bnx2x_napi_disable(bp); - netif_tx_disable(bp->dev); -} - -/* - * Init service functions - */ - -/** - * Sets a MAC in a CAM for a few L2 Clients for E1 chip - * - * @param bp driver descriptor - * @param set set or clear an entry (1 or 0) - * @param mac pointer to a buffer containing a MAC - * @param cl_bit_vec bit vector of clients to register a MAC for - * @param cam_offset offset in a CAM to use - * @param with_bcast set broadcast MAC as well - */ -static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac, - u32 cl_bit_vec, u8 cam_offset, - u8 with_bcast) -{ - struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); - int port = BP_PORT(bp); - - /* CAM allocation - * unicasts 0-31:port0 32-63:port1 - * multicast 64-127:port0 128-191:port1 - */ - config->hdr.length = 1 + (with_bcast ? 1 : 0); - config->hdr.offset = cam_offset; - config->hdr.client_id = 0xff; - config->hdr.reserved1 = 0; - - /* primary MAC */ - config->config_table[0].cam_entry.msb_mac_addr = - swab16(*(u16 *)&mac[0]); - config->config_table[0].cam_entry.middle_mac_addr = - swab16(*(u16 *)&mac[2]); - config->config_table[0].cam_entry.lsb_mac_addr = - swab16(*(u16 *)&mac[4]); - config->config_table[0].cam_entry.flags = cpu_to_le16(port); - if (set) - config->config_table[0].target_table_entry.flags = 0; - else - CAM_INVALIDATE(config->config_table[0]); - config->config_table[0].target_table_entry.clients_bit_vector = - cpu_to_le32(cl_bit_vec); - config->config_table[0].target_table_entry.vlan_id = 0; - - DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", - (set ? "setting" : "clearing"), - config->config_table[0].cam_entry.msb_mac_addr, - config->config_table[0].cam_entry.middle_mac_addr, - config->config_table[0].cam_entry.lsb_mac_addr); - - /* broadcast */ - if (with_bcast) { - config->config_table[1].cam_entry.msb_mac_addr = - cpu_to_le16(0xffff); - config->config_table[1].cam_entry.middle_mac_addr = - cpu_to_le16(0xffff); - config->config_table[1].cam_entry.lsb_mac_addr = - cpu_to_le16(0xffff); - config->config_table[1].cam_entry.flags = cpu_to_le16(port); - if (set) - config->config_table[1].target_table_entry.flags = - TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; - else - CAM_INVALIDATE(config->config_table[1]); - config->config_table[1].target_table_entry.clients_bit_vector = - cpu_to_le32(cl_bit_vec); - config->config_table[1].target_table_entry.vlan_id = 0; - } - - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, - U64_HI(bnx2x_sp_mapping(bp, mac_config)), - U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); -} - -/** - * Sets a MAC in a CAM for a few L2 Clients for E1H chip - * - * @param bp driver descriptor - * @param set set or clear an entry (1 or 0) - * @param mac pointer to a buffer containing a MAC - * @param cl_bit_vec bit vector of clients to register a MAC for - * @param cam_offset offset in a CAM to use - */ -static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, - u32 cl_bit_vec, u8 cam_offset) -{ - struct mac_configuration_cmd_e1h *config = - (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); - - config->hdr.length = 1; - config->hdr.offset = cam_offset; - config->hdr.client_id = 0xff; - config->hdr.reserved1 = 0; - - /* primary MAC */ - config->config_table[0].msb_mac_addr = - swab16(*(u16 *)&mac[0]); - config->config_table[0].middle_mac_addr = - swab16(*(u16 *)&mac[2]); - config->config_table[0].lsb_mac_addr = - swab16(*(u16 *)&mac[4]); - config->config_table[0].clients_bit_vector = - cpu_to_le32(cl_bit_vec); - config->config_table[0].vlan_id = 0; - config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); - if (set) - config->config_table[0].flags = BP_PORT(bp); - else - config->config_table[0].flags = - MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; - - DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n", - (set ? "setting" : "clearing"), - config->config_table[0].msb_mac_addr, - config->config_table[0].middle_mac_addr, - config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec); - - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, - U64_HI(bnx2x_sp_mapping(bp, mac_config)), - U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); -} - -static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, - int *state_p, int poll) -{ - /* can take a while if any port is running */ - int cnt = 5000; - - DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", - poll ? "polling" : "waiting", state, idx); - - might_sleep(); - while (cnt--) { - if (poll) { - bnx2x_rx_int(bp->fp, 10); - /* if index is different from 0 - * the reply for some commands will - * be on the non default queue - */ - if (idx) - bnx2x_rx_int(&bp->fp[idx], 10); - } - - mb(); /* state is changed by bnx2x_sp_event() */ - if (*state_p == state) { -#ifdef BNX2X_STOP_ON_ERROR - DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt); -#endif - return 0; - } - - msleep(1); - - if (bp->panic) - return -EIO; - } - - /* timeout! */ - BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", - poll ? "polling" : "waiting", state, idx); -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#endif - - return -EBUSY; -} - -static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) -{ - bp->set_mac_pending++; - smp_wmb(); - - bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr, - (1 << bp->fp->cl_id), BP_FUNC(bp)); - - /* Wait for a completion */ - bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); -} - -static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) -{ - bp->set_mac_pending++; - smp_wmb(); - - bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr, - (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0), - 1); - - /* Wait for a completion */ - bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); -} - -#ifdef BCM_CNIC -/** - * Set iSCSI MAC(s) at the next enties in the CAM after the ETH - * MAC(s). This function will wait until the ramdord completion - * returns. - * - * @param bp driver handle - * @param set set or clear the CAM entry - * - * @return 0 if cussess, -ENODEV if ramrod doesn't return. - */ -static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) -{ - u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); - - bp->set_mac_pending++; - smp_wmb(); - - /* Send a SET_MAC ramrod */ - if (CHIP_IS_E1(bp)) - bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac, - cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2, - 1); - else - /* CAM allocation for E1H - * unicasts: by func number - * multicast: 20+FUNC*20, 20 each - */ - bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac, - cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp)); - - /* Wait for a completion when setting */ - bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); - - return 0; -} -#endif - -static int bnx2x_setup_leading(struct bnx2x *bp) -{ - int rc; - - /* reset IGU state */ - bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); - - /* SETUP ramrod */ - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); - - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); - - return rc; -} - -static int bnx2x_setup_multi(struct bnx2x *bp, int index) -{ - struct bnx2x_fastpath *fp = &bp->fp[index]; - - /* reset IGU state */ - bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); - - /* SETUP ramrod */ - fp->state = BNX2X_FP_STATE_OPENING; - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, - fp->cl_id, 0); - - /* Wait for completion */ - return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, - &(fp->state), 0); -} - -static int bnx2x_poll(struct napi_struct *napi, int budget); - -static void bnx2x_set_num_queues_msix(struct bnx2x *bp) -{ - - switch (bp->multi_mode) { - case ETH_RSS_MODE_DISABLED: - bp->num_queues = 1; - break; - - case ETH_RSS_MODE_REGULAR: - if (num_queues) - bp->num_queues = min_t(u32, num_queues, - BNX2X_MAX_QUEUES(bp)); - else - bp->num_queues = min_t(u32, num_online_cpus(), - BNX2X_MAX_QUEUES(bp)); - break; - - - default: - bp->num_queues = 1; - break; - } -} - -static int bnx2x_set_num_queues(struct bnx2x *bp) -{ - int rc = 0; - - switch (int_mode) { - case INT_MODE_INTx: - case INT_MODE_MSI: - bp->num_queues = 1; - DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); - break; - default: - /* Set number of queues according to bp->multi_mode value */ - bnx2x_set_num_queues_msix(bp); - - DP(NETIF_MSG_IFUP, "set number of queues to %d\n", - bp->num_queues); - - /* if we can't use MSI-X we only need one fp, - * so try to enable MSI-X with the requested number of fp's - * and fallback to MSI or legacy INTx with one fp - */ - rc = bnx2x_enable_msix(bp); - if (rc) - /* failed to enable MSI-X */ - bp->num_queues = 1; - break; - } - bp->dev->real_num_tx_queues = bp->num_queues; - return rc; -} - -#ifdef BCM_CNIC -static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); -static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); -#endif - -/* must be called with rtnl_lock */ -static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) -{ - u32 load_code; - int i, rc; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return -EPERM; -#endif - - bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; - - rc = bnx2x_set_num_queues(bp); - - if (bnx2x_alloc_mem(bp)) { - bnx2x_free_irq(bp, true); - return -ENOMEM; - } - - for_each_queue(bp, i) - bnx2x_fp(bp, i, disable_tpa) = - ((bp->flags & TPA_ENABLE_FLAG) == 0); - - for_each_queue(bp, i) - netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, 128); - - bnx2x_napi_enable(bp); - - if (bp->flags & USING_MSIX_FLAG) { - rc = bnx2x_req_msix_irqs(bp); - if (rc) { - bnx2x_free_irq(bp, true); - goto load_error1; - } - } else { - /* Fall to INTx if failed to enable MSI-X due to lack of - memory (in bnx2x_set_num_queues()) */ - if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) - bnx2x_enable_msi(bp); - bnx2x_ack_int(bp); - rc = bnx2x_req_irq(bp); - if (rc) { - BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); - bnx2x_free_irq(bp, true); - goto load_error1; - } - if (bp->flags & USING_MSI_FLAG) { - bp->dev->irq = bp->pdev->irq; - netdev_info(bp->dev, "using MSI IRQ %d\n", - bp->pdev->irq); - } - } - - /* Send LOAD_REQUEST command to MCP - Returns the type of LOAD command: - if it is the first port to be initialized - common blocks should be initialized, otherwise - not - */ - if (!BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; - goto load_error2; - } - if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { - rc = -EBUSY; /* other port in diagnostic mode */ - goto load_error2; - } - - } else { - int port = BP_PORT(bp); - - DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", - load_count[0], load_count[1], load_count[2]); - load_count[0]++; - load_count[1 + port]++; - DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", - load_count[0], load_count[1], load_count[2]); - if (load_count[0] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_COMMON; - else if (load_count[1 + port] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_PORT; - else - load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; - } - - if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || - (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) - bp->port.pmf = 1; - else - bp->port.pmf = 0; - DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); - - /* Initialize HW */ - rc = bnx2x_init_hw(bp, load_code); - if (rc) { - BNX2X_ERR("HW init failed, aborting\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - goto load_error2; - } - - /* Setup NIC internals and enable interrupts */ - bnx2x_nic_init(bp, load_code); - - if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && - (bp->common.shmem2_base)) - SHMEM2_WR(bp, dcc_support, - (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | - SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); - - /* Send LOAD_DONE command to MCP */ - if (!BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; - goto load_error3; - } - } - - bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; - - rc = bnx2x_setup_leading(bp); - if (rc) { - BNX2X_ERR("Setup leading failed!\n"); -#ifndef BNX2X_STOP_ON_ERROR - goto load_error3; -#else - bp->panic = 1; - return -EBUSY; -#endif - } - - if (CHIP_IS_E1H(bp)) - if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { - DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); - bp->flags |= MF_FUNC_DIS; - } - - if (bp->state == BNX2X_STATE_OPEN) { -#ifdef BCM_CNIC - /* Enable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); -#endif - for_each_nondefault_queue(bp, i) { - rc = bnx2x_setup_multi(bp, i); - if (rc) -#ifdef BCM_CNIC - goto load_error4; -#else - goto load_error3; -#endif - } - - if (CHIP_IS_E1(bp)) - bnx2x_set_eth_mac_addr_e1(bp, 1); - else - bnx2x_set_eth_mac_addr_e1h(bp, 1); -#ifdef BCM_CNIC - /* Set iSCSI L2 MAC */ - mutex_lock(&bp->cnic_mutex); - if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { - bnx2x_set_iscsi_eth_mac_addr(bp, 1); - bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; - bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, - CNIC_SB_ID(bp)); - } - mutex_unlock(&bp->cnic_mutex); -#endif - } - - if (bp->port.pmf) - bnx2x_initial_phy_init(bp, load_mode); - - /* Start fast path */ - switch (load_mode) { - case LOAD_NORMAL: - if (bp->state == BNX2X_STATE_OPEN) { - /* Tx queue should be only reenabled */ - netif_tx_wake_all_queues(bp->dev); - } - /* Initialize the receive filter. */ - bnx2x_set_rx_mode(bp->dev); - break; - - case LOAD_OPEN: - netif_tx_start_all_queues(bp->dev); - if (bp->state != BNX2X_STATE_OPEN) - netif_tx_disable(bp->dev); - /* Initialize the receive filter. */ - bnx2x_set_rx_mode(bp->dev); - break; - - case LOAD_DIAG: - /* Initialize the receive filter. */ - bnx2x_set_rx_mode(bp->dev); - bp->state = BNX2X_STATE_DIAG; - break; - - default: - break; - } - - if (!bp->port.pmf) - bnx2x__link_status_update(bp); - - /* start the timer */ - mod_timer(&bp->timer, jiffies + bp->current_interval); - -#ifdef BCM_CNIC - bnx2x_setup_cnic_irq_info(bp); - if (bp->state == BNX2X_STATE_OPEN) - bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); -#endif - bnx2x_inc_load_cnt(bp); - - return 0; - -#ifdef BCM_CNIC -load_error4: - /* Disable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); -#endif -load_error3: - bnx2x_int_disable_sync(bp, 1); - if (!BP_NOMCP(bp)) { - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - } - bp->port.pmf = 0; - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - for_each_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); -load_error2: - /* Release IRQs */ - bnx2x_free_irq(bp, false); -load_error1: - bnx2x_napi_disable(bp); - for_each_queue(bp, i) - netif_napi_del(&bnx2x_fp(bp, i, napi)); - bnx2x_free_mem(bp); - - return rc; -} - -static int bnx2x_stop_multi(struct bnx2x *bp, int index) -{ - struct bnx2x_fastpath *fp = &bp->fp[index]; - int rc; - - /* halt the connection */ - fp->state = BNX2X_FP_STATE_HALTING; - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0); - - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, - &(fp->state), 1); - if (rc) /* timeout */ - return rc; - - /* delete cfc entry */ - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); - - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, - &(fp->state), 1); - return rc; -} - -static int bnx2x_stop_leading(struct bnx2x *bp) -{ - __le16 dsb_sp_prod_idx; - /* if the other port is handling traffic, - this can take a lot of time */ - int cnt = 500; - int rc; - - might_sleep(); - - /* Send HALT ramrod */ - bp->fp[0].state = BNX2X_FP_STATE_HALTING; - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0); - - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, - &(bp->fp[0].state), 1); - if (rc) /* timeout */ - return rc; - - dsb_sp_prod_idx = *bp->dsb_sp_prod; - - /* Send PORT_DELETE ramrod */ - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1); - - /* Wait for completion to arrive on default status block - we are going to reset the chip anyway - so there is not much to do if this times out - */ - while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { - if (!cnt) { - DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " - "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", - *bp->dsb_sp_prod, dsb_sp_prod_idx); -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#endif - rc = -EBUSY; - break; - } - cnt--; - msleep(1); - rmb(); /* Refresh the dsb_sp_prod */ - } - bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; - bp->fp[0].state = BNX2X_FP_STATE_CLOSED; - - return rc; -} - -static void bnx2x_reset_func(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int base, i; - - /* Configure IGU */ - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); - -#ifdef BCM_CNIC - /* Disable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); - /* - * Wait for at least 10ms and up to 2 second for the timers scan to - * complete - */ - for (i = 0; i < 200; i++) { - msleep(10); - if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) - break; - } -#endif - /* Clear ILT */ - base = FUNC_ILT_BASE(func); - for (i = base; i < base + ILT_PER_FUNC; i++) - bnx2x_ilt_wr(bp, i, 0); -} - -static void bnx2x_reset_port(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 val; - - REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); - - /* Do not rcv packets to BRB */ - REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); - /* Do not direct rcv packets that are not for MCP to the BRB */ - REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : - NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); - - /* Configure AEU */ - REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); - - msleep(100); - /* Check for BRB port occupancy */ - val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); - if (val) - DP(NETIF_MSG_IFDOWN, - "BRB1 is not empty %d blocks are occupied\n", val); - - /* TODO: Close Doorbell port? */ -} - -static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) -{ - DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", - BP_FUNC(bp), reset_code); - - switch (reset_code) { - case FW_MSG_CODE_DRV_UNLOAD_COMMON: - bnx2x_reset_port(bp); - bnx2x_reset_func(bp); - bnx2x_reset_common(bp); - break; - - case FW_MSG_CODE_DRV_UNLOAD_PORT: - bnx2x_reset_port(bp); - bnx2x_reset_func(bp); - break; - - case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: - bnx2x_reset_func(bp); - break; - - default: - BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code); - break; - } -} - -static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) -{ - int port = BP_PORT(bp); - u32 reset_code = 0; - int i, cnt, rc; - - /* Wait until tx fastpath tasks complete */ - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - cnt = 1000; - while (bnx2x_has_tx_work_unload(fp)) { - - bnx2x_tx_int(fp); - if (!cnt) { - BNX2X_ERR("timeout waiting for queue[%d]\n", - i); -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); - return -EBUSY; -#else - break; -#endif - } - cnt--; - msleep(1); - } - } - /* Give HW time to discard old tx messages */ - msleep(1); - - if (CHIP_IS_E1(bp)) { - struct mac_configuration_cmd *config = - bnx2x_sp(bp, mcast_config); - - bnx2x_set_eth_mac_addr_e1(bp, 0); - - for (i = 0; i < config->hdr.length; i++) - CAM_INVALIDATE(config->config_table[i]); - - config->hdr.length = i; - if (CHIP_REV_IS_SLOW(bp)) - config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); - else - config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port); - config->hdr.client_id = bp->fp->cl_id; - config->hdr.reserved1 = 0; - - bp->set_mac_pending++; - smp_wmb(); - - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, - U64_HI(bnx2x_sp_mapping(bp, mcast_config)), - U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); - - } else { /* E1H */ - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - - bnx2x_set_eth_mac_addr_e1h(bp, 0); - - for (i = 0; i < MC_HASH_SIZE; i++) - REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); - - REG_WR(bp, MISC_REG_E1HMF_MODE, 0); - } -#ifdef BCM_CNIC - /* Clear iSCSI L2 MAC */ - mutex_lock(&bp->cnic_mutex); - if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) { - bnx2x_set_iscsi_eth_mac_addr(bp, 0); - bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET; - } - mutex_unlock(&bp->cnic_mutex); -#endif - - if (unload_mode == UNLOAD_NORMAL) - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - else if (bp->flags & NO_WOL_FLAG) - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; - - else if (bp->wol) { - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u8 *mac_addr = bp->dev->dev_addr; - u32 val; - /* The mac address is written to entries 1-4 to - preserve entry 0 which is used by the PMF */ - u8 entry = (BP_E1HVN(bp) + 1)*8; - - val = (mac_addr[0] << 8) | mac_addr[1]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); - - val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); - - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; - - } else - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - /* Close multi and leading connections - Completions for ramrods are collected in a synchronous way */ - for_each_nondefault_queue(bp, i) - if (bnx2x_stop_multi(bp, i)) - goto unload_error; - - rc = bnx2x_stop_leading(bp); - if (rc) { - BNX2X_ERR("Stop leading failed!\n"); -#ifdef BNX2X_STOP_ON_ERROR - return -EBUSY; -#else - goto unload_error; -#endif - } - -unload_error: - if (!BP_NOMCP(bp)) - reset_code = bnx2x_fw_command(bp, reset_code); - else { - DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", - load_count[0], load_count[1], load_count[2]); - load_count[0]--; - load_count[1 + port]--; - DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n", - load_count[0], load_count[1], load_count[2]); - if (load_count[0] == 0) - reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; - else if (load_count[1 + port] == 0) - reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; - else - reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; - } - - if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) || - (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) - bnx2x__link_reset(bp); - - /* Reset the chip */ - bnx2x_reset_chip(bp, reset_code); - - /* Report UNLOAD_DONE to MCP */ - if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - -} - -static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp) -{ - u32 val; - - DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); - - if (CHIP_IS_E1(bp)) { - int port = BP_PORT(bp); - u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0; - - val = REG_RD(bp, addr); - val &= ~(0x300); - REG_WR(bp, addr, val); - } else if (CHIP_IS_E1H(bp)) { - val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); - val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | - MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); - REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); - } -} - -/* must be called with rtnl_lock */ -static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) -{ - int i; - - if (bp->state == BNX2X_STATE_CLOSED) { - /* Interface has been removed - nothing to recover */ - bp->recovery_state = BNX2X_RECOVERY_DONE; - bp->is_leader = 0; - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); - smp_wmb(); - - return -EINVAL; - } - -#ifdef BCM_CNIC - bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); -#endif - bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; - - /* Set "drop all" */ - bp->rx_mode = BNX2X_RX_MODE_NONE; - bnx2x_set_storm_rx_mode(bp); - - /* Disable HW interrupts, NAPI and Tx */ - bnx2x_netif_stop(bp, 1); - netif_carrier_off(bp->dev); - - del_timer_sync(&bp->timer); - SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, - (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* Release IRQs */ - bnx2x_free_irq(bp, false); - - /* Cleanup the chip if needed */ - if (unload_mode != UNLOAD_RECOVERY) - bnx2x_chip_cleanup(bp, unload_mode); - - bp->port.pmf = 0; - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - for_each_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - for_each_queue(bp, i) - netif_napi_del(&bnx2x_fp(bp, i, napi)); - bnx2x_free_mem(bp); - - bp->state = BNX2X_STATE_CLOSED; - - /* The last driver must disable a "close the gate" if there is no - * parity attention or "process kill" pending. - */ - if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && - bnx2x_reset_is_done(bp)) - bnx2x_disable_close_the_gate(bp); - - /* Reset MCP mail box sequence if there is on going recovery */ - if (unload_mode == UNLOAD_RECOVERY) - bp->fw_seq = 0; - - return 0; -} - -/* Close gates #2, #3 and #4: */ -static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) -{ - u32 val, addr; - - /* Gates #2 and #4a are closed/opened for "not E1" only */ - if (!CHIP_IS_E1(bp)) { - /* #4 */ - val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS); - REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, - close ? (val | 0x1) : (val & (~(u32)1))); - /* #2 */ - val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES); - REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, - close ? (val | 0x1) : (val & (~(u32)1))); - } - - /* #3 */ - addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; - val = REG_RD(bp, addr); - REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1))); - - DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", - close ? "closing" : "opening"); - mmiowb(); -} - -#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ - -static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) -{ - /* Do some magic... */ - u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); - *magic_val = val & SHARED_MF_CLP_MAGIC; - MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); -} - -/* Restore the value of the `magic' bit. - * - * @param pdev Device handle. - * @param magic_val Old value of the `magic' bit. - */ -static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) -{ - /* Restore the `magic' bit value... */ - /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb); - SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb, - (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */ - u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); - MF_CFG_WR(bp, shared_mf_config.clp_mb, - (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); -} - -/* Prepares for MCP reset: takes care of CLP configurations. - * - * @param bp - * @param magic_val Old value of 'magic' bit. - */ -static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) -{ - u32 shmem; - u32 validity_offset; - - DP(NETIF_MSG_HW, "Starting\n"); - - /* Set `magic' bit in order to save MF config */ - if (!CHIP_IS_E1(bp)) - bnx2x_clp_reset_prep(bp, magic_val); - - /* Get shmem offset */ - shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - validity_offset = offsetof(struct shmem_region, validity_map[0]); - - /* Clear validity map flags */ - if (shmem > 0) - REG_WR(bp, shmem + validity_offset, 0); -} - -#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ -#define MCP_ONE_TIMEOUT 100 /* 100 ms */ - -/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10, - * depending on the HW type. - * - * @param bp - */ -static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) -{ - /* special handling for emulation and FPGA, - wait 10 times longer */ - if (CHIP_REV_IS_SLOW(bp)) - msleep(MCP_ONE_TIMEOUT*10); - else - msleep(MCP_ONE_TIMEOUT); -} - -static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) -{ - u32 shmem, cnt, validity_offset, val; - int rc = 0; - - msleep(100); - - /* Get shmem offset */ - shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - if (shmem == 0) { - BNX2X_ERR("Shmem 0 return failure\n"); - rc = -ENOTTY; - goto exit_lbl; - } - - validity_offset = offsetof(struct shmem_region, validity_map[0]); - - /* Wait for MCP to come up */ - for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) { - /* TBD: its best to check validity map of last port. - * currently checks on port 0. - */ - val = REG_RD(bp, shmem + validity_offset); - DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem, - shmem + validity_offset, val); - - /* check that shared memory is valid. */ - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - break; - - bnx2x_mcp_wait_one(bp); - } - - DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val); - - /* Check that shared memory is valid. This indicates that MCP is up. */ - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != - (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { - BNX2X_ERR("Shmem signature not present. MCP is not up !!\n"); - rc = -ENOTTY; - goto exit_lbl; - } - -exit_lbl: - /* Restore the `magic' bit value */ - if (!CHIP_IS_E1(bp)) - bnx2x_clp_reset_done(bp, magic_val); - - return rc; -} - -static void bnx2x_pxp_prep(struct bnx2x *bp) -{ - if (!CHIP_IS_E1(bp)) { - REG_WR(bp, PXP2_REG_RD_START_INIT, 0); - REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); - REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0); - mmiowb(); - } -} - -/* - * Reset the whole chip except for: - * - PCIE core - * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by - * one reset bit) - * - IGU - * - MISC (including AEU) - * - GRC - * - RBCN, RBCP - */ -static void bnx2x_process_kill_chip_reset(struct bnx2x *bp) -{ - u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; - - not_reset_mask1 = - MISC_REGISTERS_RESET_REG_1_RST_HC | - MISC_REGISTERS_RESET_REG_1_RST_PXPV | - MISC_REGISTERS_RESET_REG_1_RST_PXP; - - not_reset_mask2 = - MISC_REGISTERS_RESET_REG_2_RST_MDIO | - MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | - MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | - MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | - MISC_REGISTERS_RESET_REG_2_RST_RBCN | - MISC_REGISTERS_RESET_REG_2_RST_GRC | - MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | - MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B; - - reset_mask1 = 0xffffffff; - - if (CHIP_IS_E1(bp)) - reset_mask2 = 0xffff; - else - reset_mask2 = 0x1ffff; - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - reset_mask1 & (~not_reset_mask1)); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - reset_mask2 & (~not_reset_mask2)); - - barrier(); - mmiowb(); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2); - mmiowb(); -} - -static int bnx2x_process_kill(struct bnx2x *bp) -{ - int cnt = 1000; - u32 val = 0; - u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; - - - /* Empty the Tetris buffer, wait for 1s */ - do { - sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); - blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); - port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); - port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); - pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); - if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && - ((port_is_idle_0 & 0x1) == 0x1) && - ((port_is_idle_1 & 0x1) == 0x1) && - (pgl_exp_rom2 == 0xffffffff)) - break; - msleep(1); - } while (cnt-- > 0); - - if (cnt <= 0) { - DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" - " are still" - " outstanding read requests after 1s!\n"); - DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," - " port_is_idle_0=0x%08x," - " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", - sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, - pgl_exp_rom2); - return -EAGAIN; - } - - barrier(); - - /* Close gates #2, #3 and #4 */ - bnx2x_set_234_gates(bp, true); - - /* TBD: Indicate that "process kill" is in progress to MCP */ - - /* Clear "unprepared" bit */ - REG_WR(bp, MISC_REG_UNPREPARED, 0); - barrier(); - - /* Make sure all is written to the chip before the reset */ - mmiowb(); - - /* Wait for 1ms to empty GLUE and PCI-E core queues, - * PSWHST, GRC and PSWRD Tetris buffer. - */ - msleep(1); - - /* Prepare to chip reset: */ - /* MCP */ - bnx2x_reset_mcp_prep(bp, &val); - - /* PXP */ - bnx2x_pxp_prep(bp); - barrier(); - - /* reset the chip */ - bnx2x_process_kill_chip_reset(bp); - barrier(); - - /* Recover after reset: */ - /* MCP */ - if (bnx2x_reset_mcp_comp(bp, val)) - return -EAGAIN; - - /* PXP */ - bnx2x_pxp_prep(bp); - - /* Open the gates #2, #3 and #4 */ - bnx2x_set_234_gates(bp, false); - - /* TBD: IGU/AEU preparation bring back the AEU/IGU to a - * reset state, re-enable attentions. */ - - return 0; -} - -static int bnx2x_leader_reset(struct bnx2x *bp) -{ - int rc = 0; - /* Try to recover after the failure */ - if (bnx2x_process_kill(bp)) { - printk(KERN_ERR "%s: Something bad had happen! Aii!\n", - bp->dev->name); - rc = -EAGAIN; - goto exit_leader_reset; - } - - /* Clear "reset is in progress" bit and update the driver state */ - bnx2x_set_reset_done(bp); - bp->recovery_state = BNX2X_RECOVERY_DONE; - -exit_leader_reset: - bp->is_leader = 0; - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); - smp_wmb(); - return rc; + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); + REG_WR(bp, QM_REG_QM_INT_MASK, 0); + REG_WR(bp, TM_REG_TM_INT_MASK, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); + REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ + REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); + REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); + REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ + REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); + REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ + if (CHIP_REV_IS_FPGA(bp)) + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); + else + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); + REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); +/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ + REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); + REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); +/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ + REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ } -static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); - -/* Assumption: runs under rtnl lock. This together with the fact - * that it's called only from bnx2x_reset_task() ensure that it - * will never be called when netif_running(bp->dev) is false. - */ -static void bnx2x_parity_recover(struct bnx2x *bp) -{ - DP(NETIF_MSG_HW, "Handling parity\n"); - while (1) { - switch (bp->recovery_state) { - case BNX2X_RECOVERY_INIT: - DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); - /* Try to get a LEADER_LOCK HW lock */ - if (bnx2x_trylock_hw_lock(bp, - HW_LOCK_RESOURCE_RESERVED_08)) - bp->is_leader = 1; - - /* Stop the driver */ - /* If interface has been removed - break */ - if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) - return; - - bp->recovery_state = BNX2X_RECOVERY_WAIT; - /* Ensure "is_leader" and "recovery_state" - * update values are seen on other CPUs - */ - smp_wmb(); - break; - - case BNX2X_RECOVERY_WAIT: - DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); - if (bp->is_leader) { - u32 load_counter = bnx2x_get_load_cnt(bp); - if (load_counter) { - /* Wait until all other functions get - * down. - */ - schedule_delayed_work(&bp->reset_task, - HZ/10); - return; - } else { - /* If all other functions got down - - * try to bring the chip back to - * normal. In any case it's an exit - * point for a leader. - */ - if (bnx2x_leader_reset(bp) || - bnx2x_nic_load(bp, LOAD_NORMAL)) { - printk(KERN_ERR"%s: Recovery " - "has failed. Power cycle is " - "needed.\n", bp->dev->name); - /* Disconnect this device */ - netif_device_detach(bp->dev); - /* Block ifup for all function - * of this ASIC until - * "process kill" or power - * cycle. - */ - bnx2x_set_reset_in_progress(bp); - /* Shut down the power */ - bnx2x_set_power_state(bp, - PCI_D3hot); - return; - } - - return; - } - } else { /* non-leader */ - if (!bnx2x_reset_is_done(bp)) { - /* Try to get a LEADER_LOCK HW lock as - * long as a former leader may have - * been unloaded by the user or - * released a leadership by another - * reason. - */ - if (bnx2x_trylock_hw_lock(bp, - HW_LOCK_RESOURCE_RESERVED_08)) { - /* I'm a leader now! Restart a - * switch case. - */ - bp->is_leader = 1; - break; - } - - schedule_delayed_work(&bp->reset_task, - HZ/10); - return; - - } else { /* A leader has completed - * the "process kill". It's an exit - * point for a non-leader. - */ - bnx2x_nic_load(bp, LOAD_NORMAL); - bp->recovery_state = - BNX2X_RECOVERY_DONE; - smp_wmb(); - return; - } - } - default: - return; - } - } -} +static const struct { + u32 addr; + u32 mask; +} bnx2x_parity_mask[] = { + {PXP_REG_PXP_PRTY_MASK, 0xffffffff}, + {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, + {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff}, + {HC_REG_HC_PRTY_MASK, 0xffffffff}, + {MISC_REG_MISC_PRTY_MASK, 0xffffffff}, + {QM_REG_QM_PRTY_MASK, 0x0}, + {DORQ_REG_DORQ_PRTY_MASK, 0x0}, + {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, + {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, + {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ + {CDU_REG_CDU_PRTY_MASK, 0x0}, + {CFC_REG_CFC_PRTY_MASK, 0x0}, + {DBG_REG_DBG_PRTY_MASK, 0x0}, + {DMAE_REG_DMAE_PRTY_MASK, 0x0}, + {BRB1_REG_BRB1_PRTY_MASK, 0x0}, + {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ + {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */ + {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ + {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */ + {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ + {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, + {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, + {USEM_REG_USEM_PRTY_MASK_0, 0x0}, + {USEM_REG_USEM_PRTY_MASK_1, 0x0}, + {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, + {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, + {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, + {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} +}; -/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is - * scheduled on a general queue in order to prevent a dead lock. - */ -static void bnx2x_reset_task(struct work_struct *work) +static void enable_blocks_parity(struct bnx2x *bp) { - struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work); + int i, mask_arr_len = + sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0])); -#ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("reset task called but STOP_ON_ERROR defined" - " so reset not done to allow debug dump,\n" - KERN_ERR " you will need to reboot when done\n"); - return; -#endif + for (i = 0; i < mask_arr_len; i++) + REG_WR(bp, bnx2x_parity_mask[i].addr, + bnx2x_parity_mask[i].mask); +} - rtnl_lock(); - if (!netif_running(bp->dev)) - goto reset_task_exit; +static void bnx2x_reset_common(struct bnx2x *bp) +{ + /* reset_common */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0xd3ffff7f); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); +} - if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) - bnx2x_parity_recover(bp); +static void bnx2x_init_pxp(struct bnx2x *bp) +{ + u16 devctl; + int r_order, w_order; + + pci_read_config_word(bp->pdev, + bp->pcie_cap + PCI_EXP_DEVCTL, &devctl); + DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); + w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); + if (bp->mrrs == -1) + r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); else { - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - bnx2x_nic_load(bp, LOAD_NORMAL); + DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); + r_order = bp->mrrs; } -reset_task_exit: - rtnl_unlock(); + bnx2x_init_pxp_arb(bp, r_order, w_order); } -/* end of nic load/unload */ +static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) +{ + int is_required; + u32 val; + int port; -/* ethtool_ops */ + if (BP_NOMCP(bp)) + return; -/* - * Init service functions - */ + is_required = 0; + val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_FAN_FAILURE_MASK; -static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func) -{ - switch (func) { - case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0; - case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1; - case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2; - case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3; - case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4; - case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5; - case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6; - case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7; - default: - BNX2X_ERR("Unsupported function index: %d\n", func); - return (u32)(-1); - } -} + if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) + is_required = 1; -static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func) -{ - u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val; + /* + * The fan failure mechanism is usually related to the PHY type since + * the power consumption of the board is affected by the PHY. Currently, + * fan is required for most designs with SFX7101, BCM8727 and BCM8481. + */ + else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) + for (port = PORT_0; port < PORT_MAX; port++) { + u32 phy_type = + SHMEM_RD(bp, dev_info.port_hw_config[port]. + external_phy_config) & + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + is_required |= + ((phy_type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) || + (phy_type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || + (phy_type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481)); + } - /* Flush all outstanding writes */ - mmiowb(); + DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); - /* Pretend to be function 0 */ - REG_WR(bp, reg, 0); - /* Flush the GRC transaction (in the chip) */ - new_val = REG_RD(bp, reg); - if (new_val != 0) { - BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n", - new_val); - BUG(); - } + if (is_required == 0) + return; - /* From now we are in the "like-E1" mode */ - bnx2x_int_disable(bp); + /* Fan failure is indicated by SPIO 5 */ + bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, + MISC_REGISTERS_SPIO_INPUT_HI_Z); - /* Flush all outstanding writes */ - mmiowb(); + /* set to active low mode */ + val = REG_RD(bp, MISC_REG_SPIO_INT); + val |= ((1 << MISC_REGISTERS_SPIO_5) << + MISC_REGISTERS_SPIO_INT_OLD_SET_POS); + REG_WR(bp, MISC_REG_SPIO_INT, val); - /* Restore the original funtion settings */ - REG_WR(bp, reg, orig_func); - new_val = REG_RD(bp, reg); - if (new_val != orig_func) { - BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n", - orig_func, new_val); - BUG(); - } + /* enable interrupt to signal the IGU */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + val |= (1 << MISC_REGISTERS_SPIO_5); + REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); } -static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func) +static int bnx2x_init_common(struct bnx2x *bp) { - if (CHIP_IS_E1H(bp)) - bnx2x_undi_int_disable_e1h(bp, func); - else - bnx2x_int_disable(bp); -} + u32 val, i; +#ifdef BCM_CNIC + u32 wb_write[2]; +#endif -static void __devinit bnx2x_undi_unload(struct bnx2x *bp) -{ - u32 val; + DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); - /* Check if there is any driver already loaded */ - val = REG_RD(bp, MISC_REG_UNPREPARED); - if (val == 0x1) { - /* Check if it is the UNDI driver - * UNDI driver initializes CID offset for normal bell to 0x7 - */ - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); - if (val == 0x7) { - u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - /* save our func */ - int func = BP_FUNC(bp); - u32 swap_en; - u32 swap_val; + bnx2x_reset_common(bp); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); - /* clear the UNDI indication */ - REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); + bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); + if (CHIP_IS_E1H(bp)) + REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp)); - BNX2X_DEV_INFO("UNDI is active! reset device\n"); + REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); + msleep(30); + REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); - /* try unload UNDI on port 0 */ - bp->func = 0; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - reset_code = bnx2x_fw_command(bp, reset_code); + bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); + if (CHIP_IS_E1(bp)) { + /* enable HW interrupt from PXP on USDM overflow + bit 16 on INT_MASK_0 */ + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + } - /* if UNDI is loaded on the other port */ - if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { + bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE); + bnx2x_init_pxp(bp); - /* send "DONE" for previous unload */ - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); +#ifdef __BIG_ENDIAN + REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); + /* make sure this value is 0 */ + REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); - /* unload UNDI on port 1 */ - bp->func = 1; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; +/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ + REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); +#endif - bnx2x_fw_command(bp, reset_code); - } + REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); +#ifdef BCM_CNIC + REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); + REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); + REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); +#endif - /* now it's safe to release the lock */ - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) + REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); - bnx2x_undi_int_disable(bp, func); + /* let the HW do it's magic ... */ + msleep(100); + /* finish PXP init */ + val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 CFG failed\n"); + return -EBUSY; + } + val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 RD_INIT failed\n"); + return -EBUSY; + } - /* close input traffic and wait for it */ - /* Do not rcv packets to BRB */ - REG_WR(bp, - (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : - NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); - /* Do not direct rcv packets that are not for MCP to - * the BRB */ - REG_WR(bp, - (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP : - NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); - /* clear AEU */ - REG_WR(bp, - (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); - msleep(10); + REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); + REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); - /* save NIG port swap info */ - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - /* reset device */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0xd3ffffff); - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - 0x1403); - /* take the NIG out of reset and restore swap values */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, - MISC_REGISTERS_RESET_REG_1_RST_NIG); - REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); - REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); + bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); - /* send unload done to the MCP */ - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); + /* clean the DMAE memory */ + bp->dmae_ready = 1; + bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); - /* restore our func and fw_seq */ - bp->func = func; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); + bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE); - } else - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - } -} + bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); -static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) -{ - u32 val, val2, val3, val4, id; - u16 pmc; + bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); - /* Get the chip revision id and number. */ - /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ - val = REG_RD(bp, MISC_REG_CHIP_NUM); - id = ((val & 0xffff) << 16); - val = REG_RD(bp, MISC_REG_CHIP_REV); - id |= ((val & 0xf) << 12); - val = REG_RD(bp, MISC_REG_CHIP_METAL); - id |= ((val & 0xff) << 4); - val = REG_RD(bp, MISC_REG_BOND_ID); - id |= (val & 0xf); - bp->common.chip_id = id; - bp->link_params.chip_id = bp->common.chip_id; - BNX2X_DEV_INFO("chip ID is 0x%x\n", id); +#ifdef BCM_CNIC + wb_write[0] = 0; + wb_write[1] = 0; + for (i = 0; i < 64; i++) { + REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16)); + bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2); - val = (REG_RD(bp, 0x2874) & 0x55); - if ((bp->common.chip_id & 0x1) || - (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { - bp->flags |= ONE_PORT_FLAG; - BNX2X_DEV_INFO("single port device\n"); + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16)); + bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8, + wb_write, 2); + } } +#endif + /* soft reset pulse */ + REG_WR(bp, QM_REG_SOFT_RESET, 1); + REG_WR(bp, QM_REG_SOFT_RESET, 0); - val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); - bp->common.flash_size = (NVRAM_1MB_SIZE << - (val & MCPR_NVM_CFG4_FLASH_SIZE)); - BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", - bp->common.flash_size, bp->common.flash_size); - - bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); - bp->link_params.shmem_base = bp->common.shmem_base; - BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", - bp->common.shmem_base, bp->common.shmem2_base); +#ifdef BCM_CNIC + bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); +#endif - if (!bp->common.shmem_base || - (bp->common.shmem_base < 0xA0000) || - (bp->common.shmem_base >= 0xC0000)) { - BNX2X_DEV_INFO("MCP not active\n"); - bp->flags |= NO_MCP_FLAG; - return; + bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); + REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); + if (!CHIP_REV_IS_SLOW(bp)) { + /* enable hw interrupt from doorbell Q */ + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); } - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - BNX2X_ERROR("BAD MCP validity signature\n"); + bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); + REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); +#ifndef BCM_CNIC + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); +#endif + if (CHIP_IS_E1H(bp)) + REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); - bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); - BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); + bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); - bp->link_params.hw_led_mode = ((bp->common.hw_config & - SHARED_HW_CFG_LED_MODE_MASK) >> - SHARED_HW_CFG_LED_MODE_SHIFT); + bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); + bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); + bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); + bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - bp->link_params.feature_config_flags = 0; - val = SHMEM_RD(bp, dev_info.shared_feature_config.config); - if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) - bp->link_params.feature_config_flags |= - FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; - else - bp->link_params.feature_config_flags &= - ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); - val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; - bp->common.bc_ver = val; - BNX2X_DEV_INFO("bc_ver %X\n", val); - if (val < BNX2X_BC_VER) { - /* for now only warn - * later we might need to enforce this */ - BNX2X_ERROR("This driver needs bc_ver %X but found %X, " - "please upgrade BC\n", BNX2X_BC_VER, val); - } - bp->link_params.feature_config_flags |= - (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? - FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; + /* sync semi rtc */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0x80000000); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, + 0x80000000); - if (BP_E1HVN(bp) == 0) { - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); - bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; - } else { - /* no WOL capability for E1HVN != 0 */ - bp->flags |= NO_WOL_FLAG; - } - BNX2X_DEV_INFO("%sWoL capable\n", - (bp->flags & NO_WOL_FLAG) ? "not " : ""); + bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); - val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); - val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); - val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); - val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); + REG_WR(bp, SRC_REG_SOFT_RST, 1); + for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) + REG_WR(bp, i, random32()); + bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); +#ifdef BCM_CNIC + REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); + REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); + REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); + REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); + REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); + REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); + REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); + REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); + REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); + REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); +#endif + REG_WR(bp, SRC_REG_SOFT_RST, 0); - dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", - val, val2, val3, val4); -} + if (sizeof(union cdu_context) != 1024) + /* we currently assume that a context is 1024 bytes */ + dev_alert(&bp->pdev->dev, "please adjust the size " + "of cdu_context(%ld)\n", + (long)sizeof(union cdu_context)); -static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, - u32 switch_cfg) -{ - int port = BP_PORT(bp); - u32 ext_phy_type; + bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); + val = (4 << 24) + (0 << 12) + 1024; + REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); - switch (switch_cfg) { - case SWITCH_CFG_1G: - BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg); + bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); + REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); + /* enable context validation interrupt from CFC */ + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); - ext_phy_type = - SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config); - switch (ext_phy_type) { - case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: - BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", - ext_phy_type); + /* set the thresholds to prevent CFC/CDU race */ + REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); - bp->port.supported |= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_2500baseX_Full | - SUPPORTED_TP | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); - case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: - BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", - ext_phy_type); + bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2814, 0xffffffff); + REG_WR(bp, 0x3820, 0xffffffff); - bp->port.supported |= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_TP | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); - default: - BNX2X_ERR("NVRAM config error. " - "BAD SerDes ext_phy_config 0x%x\n", - bp->link_params.ext_phy_config); - return; - } + bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp)); + REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp)); + } - bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + - port*0x10); - BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); - break; + if (CHIP_REV_IS_SLOW(bp)) + msleep(200); - case SWITCH_CFG_10G: - BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg); + /* finish CFC init */ + val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC LL_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC AC_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC CAM_INIT failed\n"); + return -EBUSY; + } + REG_WR(bp, CFC_REG_DEBUG0, 0); - ext_phy_type = - XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); - switch (ext_phy_type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", - ext_phy_type); + /* read NIG statistic + to see if this is our first up since powerup */ + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); - bp->port.supported |= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_2500baseX_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + /* do internal memory self test */ + if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) { + BNX2X_ERR("internal mem self test failed\n"); + return -EBUSY; + } - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: - BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n", - ext_phy_type); + switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + bp->port.need_hw_lock = 1; + break; - bp->port.supported |= (SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + default: + break; + } - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: - BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n", - ext_phy_type); + bnx2x_setup_fan_failure_detection(bp); - bp->port.supported |= (SUPPORTED_10000baseT_Full | - SUPPORTED_2500baseX_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + /* clear PXP2 attentions */ + REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: - BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n", - ext_phy_type); + enable_blocks_attention(bp); + if (CHIP_PARITY_SUPPORTED(bp)) + enable_blocks_parity(bp); - bp->port.supported |= (SUPPORTED_10000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_common_init_phy(bp, bp->common.shmem_base); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: - BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n", - ext_phy_type); + return 0; +} - bp->port.supported |= (SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; +static int bnx2x_init_port(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int init_stage = port ? PORT1_STAGE : PORT0_STAGE; + u32 low, high; + u32 val; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n", - ext_phy_type); + DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); - bp->port.supported |= (SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n", - ext_phy_type); + bnx2x_init_block(bp, PXP_BLOCK, init_stage); + bnx2x_init_block(bp, PXP2_BLOCK, init_stage); - bp->port.supported |= (SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + bnx2x_init_block(bp, TCM_BLOCK, init_stage); + bnx2x_init_block(bp, UCM_BLOCK, init_stage); + bnx2x_init_block(bp, CCM_BLOCK, init_stage); + bnx2x_init_block(bp, XCM_BLOCK, init_stage); - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: - BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n", - ext_phy_type); +#ifdef BCM_CNIC + REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1); - bp->port.supported |= (SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); + REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); + REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); +#endif - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: - BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n", - ext_phy_type); + bnx2x_init_block(bp, DQ_BLOCK, init_stage); - bp->port.supported |= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; + bnx2x_init_block(bp, BRB1_BLOCK, init_stage); + if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) { + /* no pause for emulation and FPGA */ + low = 0; + high = 513; + } else { + if (IS_E1HMF(bp)) + low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); + else if (bp->dev->mtu > 4096) { + if (bp->flags & ONE_PORT_FLAG) + low = 160; + else { + val = bp->dev->mtu; + /* (24*1024 + val*4)/256 */ + low = 96 + (val/64) + ((val % 64) ? 1 : 0); + } + } else + low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); + high = low + 56; /* 14*1024/256 */ + } + REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); + REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: - BNX2X_ERR("XGXS PHY Failure detected 0x%x\n", - bp->link_params.ext_phy_config); - break; - default: - BNX2X_ERR("NVRAM config error. " - "BAD XGXS ext_phy_config 0x%x\n", - bp->link_params.ext_phy_config); - return; - } + bnx2x_init_block(bp, PRS_BLOCK, init_stage); - bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + - port*0x18); - BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); + bnx2x_init_block(bp, TSDM_BLOCK, init_stage); + bnx2x_init_block(bp, CSDM_BLOCK, init_stage); + bnx2x_init_block(bp, USDM_BLOCK, init_stage); + bnx2x_init_block(bp, XSDM_BLOCK, init_stage); - break; + bnx2x_init_block(bp, TSEM_BLOCK, init_stage); + bnx2x_init_block(bp, USEM_BLOCK, init_stage); + bnx2x_init_block(bp, CSEM_BLOCK, init_stage); + bnx2x_init_block(bp, XSEM_BLOCK, init_stage); - default: - BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", - bp->port.link_config); - return; - } - bp->link_params.phy_addr = bp->port.phy_addr; + bnx2x_init_block(bp, UPB_BLOCK, init_stage); + bnx2x_init_block(bp, XPB_BLOCK, init_stage); - /* mask what we support according to speed_cap_mask */ - if (!(bp->link_params.speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) - bp->port.supported &= ~SUPPORTED_10baseT_Half; + bnx2x_init_block(bp, PBF_BLOCK, init_stage); - if (!(bp->link_params.speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) - bp->port.supported &= ~SUPPORTED_10baseT_Full; + /* configure PBF to work without PAUSE mtu 9000 */ + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); + + /* update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); + /* update init credit */ + REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); - if (!(bp->link_params.speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) - bp->port.supported &= ~SUPPORTED_100baseT_Half; + /* probe changes */ + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); + msleep(5); + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); - if (!(bp->link_params.speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) - bp->port.supported &= ~SUPPORTED_100baseT_Full; +#ifdef BCM_CNIC + bnx2x_init_block(bp, SRCH_BLOCK, init_stage); +#endif + bnx2x_init_block(bp, CDU_BLOCK, init_stage); + bnx2x_init_block(bp, CFC_BLOCK, init_stage); - if (!(bp->link_params.speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) - bp->port.supported &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + if (CHIP_IS_E1(bp)) { + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, HC_BLOCK, init_stage); - if (!(bp->link_params.speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) - bp->port.supported &= ~SUPPORTED_2500baseX_Full; + bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); + /* init aeu_mask_attn_func_0/1: + * - SF mode: bits 3-7 are masked. only bits 0-2 are in use + * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * bits 4-7 are used for "per vn group attention" */ + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, + (IS_E1HMF(bp) ? 0xF7 : 0x7)); - if (!(bp->link_params.speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) - bp->port.supported &= ~SUPPORTED_10000baseT_Full; + bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); + bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); + bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); + bnx2x_init_block(bp, DBU_BLOCK, init_stage); + bnx2x_init_block(bp, DBG_BLOCK, init_stage); - BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported); -} + bnx2x_init_block(bp, NIG_BLOCK, init_stage); -static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) -{ - bp->link_params.req_duplex = DUPLEX_FULL; + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); - switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) { - case PORT_FEATURE_LINK_SPEED_AUTO: - if (bp->port.supported & SUPPORTED_Autoneg) { - bp->link_params.req_line_speed = SPEED_AUTO_NEG; - bp->port.advertising = bp->port.supported; - } else { - u32 ext_phy_type = - XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); + if (CHIP_IS_E1H(bp)) { + /* 0x2 disable e1hov, 0x1 enable */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, + (IS_E1HMF(bp) ? 0x1 : 0x2)); - if ((ext_phy_type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || - (ext_phy_type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { - /* force 10G, no AN */ - bp->link_params.req_line_speed = SPEED_10000; - bp->port.advertising = - (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - break; - } - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " Autoneg not supported\n", - bp->port.link_config); - return; + { + REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); + REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); + REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); } - break; + } - case PORT_FEATURE_LINK_SPEED_10M_FULL: - if (bp->port.supported & SUPPORTED_10baseT_Full) { - bp->link_params.req_line_speed = SPEED_10; - bp->port.advertising = (ADVERTISED_10baseT_Full | - ADVERTISED_TP); - } else { - BNX2X_ERROR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - bp->port.link_config, - bp->link_params.speed_cap_mask); - return; - } - break; + bnx2x_init_block(bp, MCP_BLOCK, init_stage); + bnx2x_init_block(bp, DMAE_BLOCK, init_stage); - case PORT_FEATURE_LINK_SPEED_10M_HALF: - if (bp->port.supported & SUPPORTED_10baseT_Half) { - bp->link_params.req_line_speed = SPEED_10; - bp->link_params.req_duplex = DUPLEX_HALF; - bp->port.advertising = (ADVERTISED_10baseT_Half | - ADVERTISED_TP); - } else { - BNX2X_ERROR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - bp->port.link_config, - bp->link_params.speed_cap_mask); - return; - } - break; + switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + { + u32 swap_val, swap_override, aeu_gpio_mask, offset; - case PORT_FEATURE_LINK_SPEED_100M_FULL: - if (bp->port.supported & SUPPORTED_100baseT_Full) { - bp->link_params.req_line_speed = SPEED_100; - bp->port.advertising = (ADVERTISED_100baseT_Full | - ADVERTISED_TP); - } else { - BNX2X_ERROR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - bp->port.link_config, - bp->link_params.speed_cap_mask); - return; - } - break; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_INPUT_HI_Z, port); - case PORT_FEATURE_LINK_SPEED_100M_HALF: - if (bp->port.supported & SUPPORTED_100baseT_Half) { - bp->link_params.req_line_speed = SPEED_100; - bp->link_params.req_duplex = DUPLEX_HALF; - bp->port.advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_TP); - } else { - BNX2X_ERROR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - bp->port.link_config, - bp->link_params.speed_cap_mask); - return; - } - break; + /* The GPIO should be swapped if the swap register is + set and active */ + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - case PORT_FEATURE_LINK_SPEED_1G: - if (bp->port.supported & SUPPORTED_1000baseT_Full) { - bp->link_params.req_line_speed = SPEED_1000; - bp->port.advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_TP); + /* Select function upon port-swap configuration */ + if (port == 0) { + offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + aeu_gpio_mask = (swap_val && swap_override) ? + AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 : + AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0; } else { - BNX2X_ERROR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - bp->port.link_config, - bp->link_params.speed_cap_mask); - return; + offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; + aeu_gpio_mask = (swap_val && swap_override) ? + AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 : + AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1; } - break; - - case PORT_FEATURE_LINK_SPEED_2_5G: - if (bp->port.supported & SUPPORTED_2500baseX_Full) { - bp->link_params.req_line_speed = SPEED_2500; - bp->port.advertising = (ADVERTISED_2500baseX_Full | - ADVERTISED_TP); - } else { - BNX2X_ERROR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - bp->port.link_config, - bp->link_params.speed_cap_mask); - return; + val = REG_RD(bp, offset); + /* add GPIO3 to group */ + val |= aeu_gpio_mask; + REG_WR(bp, offset, val); } break; - case PORT_FEATURE_LINK_SPEED_10G_CX4: - case PORT_FEATURE_LINK_SPEED_10G_KX4: - case PORT_FEATURE_LINK_SPEED_10G_KR: - if (bp->port.supported & SUPPORTED_10000baseT_Full) { - bp->link_params.req_line_speed = SPEED_10000; - bp->port.advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - } else { - BNX2X_ERROR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - bp->port.link_config, - bp->link_params.speed_cap_mask); - return; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + /* add SPIO 5 to group 0 */ + { + u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + val = REG_RD(bp, reg_addr); + val |= AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_addr, val); } break; default: - BNX2X_ERROR("NVRAM config error. " - "BAD link speed link_config 0x%x\n", - bp->port.link_config); - bp->link_params.req_line_speed = SPEED_AUTO_NEG; - bp->port.advertising = bp->port.supported; break; } - bp->link_params.req_flow_ctrl = (bp->port.link_config & - PORT_FEATURE_FLOW_CONTROL_MASK); - if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && - !(bp->port.supported & SUPPORTED_Autoneg)) - bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; + bnx2x__link_reset(bp); - BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" - " advertising 0x%x\n", - bp->link_params.req_line_speed, - bp->link_params.req_duplex, - bp->link_params.req_flow_ctrl, bp->port.advertising); + return 0; } -static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) +#define ILT_PER_FUNC (768/2) +#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) +/* the phys address is shifted right 12 bits and has an added + 1=valid bit added to the 53rd bit + then since this is a wide register(TM) + we split it into two 32 bit writes + */ +#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) +#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) +#define PXP_ONE_ILT(x) (((x) << 10) | x) +#define PXP_ILT_RANGE(f, l) (((l) << 10) | f) + +#ifdef BCM_CNIC +#define CNIC_ILT_LINES 127 +#define CNIC_CTX_PER_ILT 16 +#else +#define CNIC_ILT_LINES 0 +#endif + +static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) { - mac_hi = cpu_to_be16(mac_hi); - mac_lo = cpu_to_be32(mac_lo); - memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); - memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); + int reg; + + if (CHIP_IS_E1H(bp)) + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; + else /* E1 */ + reg = PXP2_REG_RQ_ONCHIP_AT + index*8; + + bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); } -static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) +static int bnx2x_init_func(struct bnx2x *bp) { int port = BP_PORT(bp); - u32 val, val2; - u32 config; - u16 i; - u32 ext_phy_type; + int func = BP_FUNC(bp); + u32 addr, val; + int i; - bp->link_params.bp = bp; - bp->link_params.port = port; + DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); - bp->link_params.lane_config = - SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); - bp->link_params.ext_phy_config = - SHMEM_RD(bp, - dev_info.port_hw_config[port].external_phy_config); - /* BCM8727_NOC => BCM8727 no over current */ - if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) { - bp->link_params.ext_phy_config &= - ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; - bp->link_params.ext_phy_config |= - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727; - bp->link_params.feature_config_flags |= - FEATURE_CONFIG_BCM8727_NOC; - } + /* set MSI reconfigure capability */ + addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); + val = REG_RD(bp, addr); + val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; + REG_WR(bp, addr, val); - bp->link_params.speed_cap_mask = - SHMEM_RD(bp, - dev_info.port_hw_config[port].speed_capability_mask); + i = FUNC_ILT_BASE(func); - bp->port.link_config = - SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); + bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i); + REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES); + } else /* E1 */ + REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, + PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); - /* Get the 4 lanes xgxs config rx and tx */ - for (i = 0; i < 2; i++) { - val = SHMEM_RD(bp, - dev_info.port_hw_config[port].xgxs_config_rx[i<<1]); - bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff); - bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff); +#ifdef BCM_CNIC + i += 1 + CNIC_ILT_LINES; + bnx2x_ilt_wr(bp, i, bp->timers_mapping); + if (CHIP_IS_E1(bp)) + REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i)); + else { + REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i); + REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i); + } - val = SHMEM_RD(bp, - dev_info.port_hw_config[port].xgxs_config_tx[i<<1]); - bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff); - bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff); + i++; + bnx2x_ilt_wr(bp, i, bp->qm_mapping); + if (CHIP_IS_E1(bp)) + REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i)); + else { + REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i); + REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i); } - /* If the device is capable of WoL, set the default state according - * to the HW - */ - config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); - bp->wol = (!(bp->flags & NO_WOL_FLAG) && - (config & PORT_FEATURE_WOL_ENABLED)); + i++; + bnx2x_ilt_wr(bp, i, bp->t1_mapping); + if (CHIP_IS_E1(bp)) + REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); + else { + REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i); + REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i); + } - BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x" - " speed_cap_mask 0x%08x link_config 0x%08x\n", - bp->link_params.lane_config, - bp->link_params.ext_phy_config, - bp->link_params.speed_cap_mask, bp->port.link_config); + /* tell the searcher where the T2 table is */ + REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64); - bp->link_params.switch_cfg |= (bp->port.link_config & - PORT_FEATURE_CONNECTED_SWITCH_MASK); - bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); + bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16, + U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping)); - bnx2x_link_settings_requested(bp); + bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16, + U64_LO((u64)bp->t2_mapping + 16*1024 - 64), + U64_HI((u64)bp->t2_mapping + 16*1024 - 64)); - /* - * If connected directly, work with the internal PHY, otherwise, work - * with the external PHY - */ - ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); - if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) - bp->mdio.prtad = bp->link_params.phy_addr; + REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10); +#endif - else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && - (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) - bp->mdio.prtad = - XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); + if (CHIP_IS_E1H(bp)) { + bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); - val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); - val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); - bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); - memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); + } -#ifdef BCM_CNIC - val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper); - val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower); - bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); -#endif -} + /* HC init per function */ + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); -static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - u32 val, val2; - int rc = 0; + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func); - bnx2x_get_common_hwinfo(bp); + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2114, 0xffffffff); + REG_WR(bp, 0x2120, 0xffffffff); - bp->e1hov = 0; - bp->e1hmf = 0; - if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { - bp->mf_config = - SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); + return 0; +} - val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) & - FUNC_MF_CFG_E1HOV_TAG_MASK); - if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) - bp->e1hmf = 1; - BNX2X_DEV_INFO("%s function mode\n", - IS_E1HMF(bp) ? "multi" : "single"); +int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) +{ + int i, rc = 0; - if (IS_E1HMF(bp)) { - val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func]. - e1hov_tag) & - FUNC_MF_CFG_E1HOV_TAG_MASK); - if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { - bp->e1hov = val; - BNX2X_DEV_INFO("E1HOV for func %d is %d " - "(0x%04x)\n", - func, bp->e1hov, bp->e1hov); - } else { - BNX2X_ERROR("No valid E1HOV for func %d," - " aborting\n", func); - rc = -EPERM; - } - } else { - if (BP_E1HVN(bp)) { - BNX2X_ERROR("VN %d in single function mode," - " aborting\n", BP_E1HVN(bp)); - rc = -EPERM; - } - } - } + DP(BNX2X_MSG_MCP, "function %d load_code %x\n", + BP_FUNC(bp), load_code); - if (!BP_NOMCP(bp)) { - bnx2x_get_port_hwinfo(bp); + bp->dmae_ready = 0; + mutex_init(&bp->dmae_mutex); + rc = bnx2x_gunzip_init(bp); + if (rc) + return rc; - bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); - } + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON: + rc = bnx2x_init_common(bp); + if (rc) + goto init_hw_err; + /* no break */ - if (IS_E1HMF(bp)) { - val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); - val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); - if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && - (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { - bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); - bp->dev->dev_addr[1] = (u8)(val2 & 0xff); - bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff); - bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff); - bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff); - bp->dev->dev_addr[5] = (u8)(val & 0xff); - memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, - ETH_ALEN); - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, - ETH_ALEN); - } + case FW_MSG_CODE_DRV_LOAD_PORT: + bp->dmae_ready = 1; + rc = bnx2x_init_port(bp); + if (rc) + goto init_hw_err; + /* no break */ - return rc; + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + bp->dmae_ready = 1; + rc = bnx2x_init_func(bp); + if (rc) + goto init_hw_err; + break; + + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + break; } - if (BP_NOMCP(bp)) { - /* only supposed to happen on emulation/FPGA */ - BNX2X_ERROR("warning: random MAC workaround active\n"); - random_ether_addr(bp->dev->dev_addr); - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); + if (!BP_NOMCP(bp)) { + int func = BP_FUNC(bp); + + bp->fw_drv_pulse_wr_seq = + (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); + DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); } + /* this needs to be done before gunzip end */ + bnx2x_zero_def_sb(bp); + for_each_queue(bp, i) + bnx2x_zero_sb(bp, BP_L_ID(bp) + i); +#ifdef BCM_CNIC + bnx2x_zero_sb(bp, BP_L_ID(bp) + i); +#endif + +init_hw_err: + bnx2x_gunzip_end(bp); + return rc; } -static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) +void bnx2x_free_mem(struct bnx2x *bp) { - int cnt, i, block_end, rodi; - char vpd_data[BNX2X_VPD_LEN+1]; - char str_id_reg[VENDOR_ID_LEN+1]; - char str_id_cap[VENDOR_ID_LEN+1]; - u8 len; - - cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); - memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); - if (cnt < BNX2X_VPD_LEN) - goto out_not_found; - - i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, - PCI_VPD_LRDT_RO_DATA); - if (i < 0) - goto out_not_found; - - - block_end = i + PCI_VPD_LRDT_TAG_SIZE + - pci_vpd_lrdt_size(&vpd_data[i]); - - i += PCI_VPD_LRDT_TAG_SIZE; - - if (block_end > BNX2X_VPD_LEN) - goto out_not_found; +#define BNX2X_PCI_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) - rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, - PCI_VPD_RO_KEYWORD_MFR_ID); - if (rodi < 0) - goto out_not_found; +#define BNX2X_FREE(x) \ + do { \ + if (x) { \ + vfree(x); \ + x = NULL; \ + } \ + } while (0) - len = pci_vpd_info_field_size(&vpd_data[rodi]); + int i; - if (len != VENDOR_ID_LEN) - goto out_not_found; + /* fastpath */ + /* Common */ + for_each_queue(bp, i) { - rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + /* status blocks */ + BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), + bnx2x_fp(bp, i, status_blk_mapping), + sizeof(struct host_status_block)); + } + /* Rx */ + for_each_queue(bp, i) { - /* vendor specific info */ - snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); - snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); - if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || - !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring), + bnx2x_fp(bp, i, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); - rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, - PCI_VPD_RO_KEYWORD_VENDOR0); - if (rodi >= 0) { - len = pci_vpd_info_field_size(&vpd_data[rodi]); + BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring), + bnx2x_fp(bp, i, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); - rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + /* SGE ring */ + BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), + bnx2x_fp(bp, i, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + } + /* Tx */ + for_each_queue(bp, i) { - if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { - memcpy(bp->fw_ver, &vpd_data[rodi], len); - bp->fw_ver[len] = ' '; - } - } - return; + /* fastpath tx rings: tx_buf tx_desc */ + BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring), + bnx2x_fp(bp, i, tx_desc_mapping), + sizeof(union eth_tx_bd_types) * NUM_TX_BD); } -out_not_found: - return; -} + /* end of fastpath */ -static int __devinit bnx2x_init_bp(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - int timer_interval; - int rc; + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, + sizeof(struct host_def_status_block)); - /* Disable interrupt handling until HW is initialized */ - atomic_set(&bp->intr_sem, 1); - smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); - mutex_init(&bp->port.phy_mutex); - mutex_init(&bp->fw_mb_mutex); - spin_lock_init(&bp->stats_lock); #ifdef BCM_CNIC - mutex_init(&bp->cnic_mutex); + BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); + BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); + BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); + BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); + BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping, + sizeof(struct host_status_block)); #endif + BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); - INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); - INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); - - rc = bnx2x_get_hwinfo(bp); - - bnx2x_read_fwinfo(bp); - /* need to reset chip if undi was active */ - if (!BP_NOMCP(bp)) - bnx2x_undi_unload(bp); +#undef BNX2X_PCI_FREE +#undef BNX2X_KFREE +} - if (CHIP_REV_IS_FPGA(bp)) - dev_err(&bp->pdev->dev, "FPGA detected\n"); +int bnx2x_alloc_mem(struct bnx2x *bp) +{ - if (BP_NOMCP(bp) && (func == 0)) - dev_err(&bp->pdev->dev, "MCP disabled, " - "must load devices in order!\n"); +#define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset(x, 0, size); \ + } while (0) - /* Set multi queue mode */ - if ((multi_mode != ETH_RSS_MODE_DISABLED) && - ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { - dev_err(&bp->pdev->dev, "Multi disabled since int_mode " - "requested is not MSI-X\n"); - multi_mode = ETH_RSS_MODE_DISABLED; - } - bp->multi_mode = multi_mode; +#define BNX2X_ALLOC(x, size) \ + do { \ + x = vmalloc(size); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset(x, 0, size); \ + } while (0) + int i; - bp->dev->features |= NETIF_F_GRO; + /* fastpath */ + /* Common */ + for_each_queue(bp, i) { + bnx2x_fp(bp, i, bp) = bp; - /* Set TPA flags */ - if (disable_tpa) { - bp->flags &= ~TPA_ENABLE_FLAG; - bp->dev->features &= ~NETIF_F_LRO; - } else { - bp->flags |= TPA_ENABLE_FLAG; - bp->dev->features |= NETIF_F_LRO; + /* status blocks */ + BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), + &bnx2x_fp(bp, i, status_blk_mapping), + sizeof(struct host_status_block)); } + /* Rx */ + for_each_queue(bp, i) { - if (CHIP_IS_E1(bp)) - bp->dropless_fc = 0; - else - bp->dropless_fc = dropless_fc; + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), + sizeof(struct sw_rx_bd) * NUM_RX_BD); + BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring), + &bnx2x_fp(bp, i, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); - bp->mrrs = mrrs; + BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring), + &bnx2x_fp(bp, i, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); - bp->tx_ring_size = MAX_TX_AVAIL; - bp->rx_ring_size = MAX_RX_AVAIL; + /* SGE ring */ + BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring), + sizeof(struct sw_rx_page) * NUM_RX_SGE); + BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring), + &bnx2x_fp(bp, i, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + } + /* Tx */ + for_each_queue(bp, i) { - bp->rx_csum = 1; + /* fastpath tx rings: tx_buf tx_desc */ + BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), + sizeof(struct sw_tx_bd) * NUM_TX_BD); + BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring), + &bnx2x_fp(bp, i, tx_desc_mapping), + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } + /* end of fastpath */ - /* make sure that the numbers are in the right granularity */ - bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); - bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); + BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, + sizeof(struct host_def_status_block)); - timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); - bp->current_interval = (poll ? poll : timer_interval); + BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); - init_timer(&bp->timer); - bp->timer.expires = jiffies + bp->current_interval; - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2x_timer; +#ifdef BCM_CNIC + BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); - return rc; -} + /* allocate searcher T2 table + we allocate 1/4 of alloc num for T2 + (which is not entered into the ILT) */ + BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); -/* - * ethtool service functions - */ + /* Initialize T2 (for 1024 connections) */ + for (i = 0; i < 16*1024; i += 64) + *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; -/* All ethtool functions called with rtnl_lock */ + /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */ + BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); -static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bnx2x *bp = netdev_priv(dev); + /* QM queues (128*MAX_CONN) */ + BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); - cmd->supported = bp->port.supported; - cmd->advertising = bp->port.advertising; + BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping, + sizeof(struct host_status_block)); +#endif - if ((bp->state == BNX2X_STATE_OPEN) && - !(bp->flags & MF_FUNC_DIS) && - (bp->link_vars.link_up)) { - cmd->speed = bp->link_vars.line_speed; - cmd->duplex = bp->link_vars.duplex; - if (IS_E1HMF(bp)) { - u16 vn_max_rate; + /* Slow path ring */ + BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); - vn_max_rate = - ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT) * 100; - if (vn_max_rate < cmd->speed) - cmd->speed = vn_max_rate; - } - } else { - cmd->speed = -1; - cmd->duplex = -1; - } + return 0; - if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { - u32 ext_phy_type = - XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); +alloc_mem_err: + bnx2x_free_mem(bp); + return -ENOMEM; - switch (ext_phy_type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - cmd->port = PORT_FIBRE; - break; +#undef BNX2X_PCI_ALLOC +#undef BNX2X_ALLOC +} - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: - cmd->port = PORT_TP; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: - BNX2X_ERR("XGXS PHY Failure detected 0x%x\n", - bp->link_params.ext_phy_config); - break; +/* + * Init service functions + */ - default: - DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", - bp->link_params.ext_phy_config); - break; - } - } else - cmd->port = PORT_TP; +/** + * Sets a MAC in a CAM for a few L2 Clients for E1 chip + * + * @param bp driver descriptor + * @param set set or clear an entry (1 or 0) + * @param mac pointer to a buffer containing a MAC + * @param cl_bit_vec bit vector of clients to register a MAC for + * @param cam_offset offset in a CAM to use + * @param with_bcast set broadcast MAC as well + */ +static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac, + u32 cl_bit_vec, u8 cam_offset, + u8 with_bcast) +{ + struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); + int port = BP_PORT(bp); - cmd->phy_address = bp->mdio.prtad; - cmd->transceiver = XCVR_INTERNAL; + /* CAM allocation + * unicasts 0-31:port0 32-63:port1 + * multicast 64-127:port0 128-191:port1 + */ + config->hdr.length = 1 + (with_bcast ? 1 : 0); + config->hdr.offset = cam_offset; + config->hdr.client_id = 0xff; + config->hdr.reserved1 = 0; - if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) - cmd->autoneg = AUTONEG_ENABLE; + /* primary MAC */ + config->config_table[0].cam_entry.msb_mac_addr = + swab16(*(u16 *)&mac[0]); + config->config_table[0].cam_entry.middle_mac_addr = + swab16(*(u16 *)&mac[2]); + config->config_table[0].cam_entry.lsb_mac_addr = + swab16(*(u16 *)&mac[4]); + config->config_table[0].cam_entry.flags = cpu_to_le16(port); + if (set) + config->config_table[0].target_table_entry.flags = 0; else - cmd->autoneg = AUTONEG_DISABLE; + CAM_INVALIDATE(config->config_table[0]); + config->config_table[0].target_table_entry.clients_bit_vector = + cpu_to_le32(cl_bit_vec); + config->config_table[0].target_table_entry.vlan_id = 0; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", + (set ? "setting" : "clearing"), + config->config_table[0].cam_entry.msb_mac_addr, + config->config_table[0].cam_entry.middle_mac_addr, + config->config_table[0].cam_entry.lsb_mac_addr); - DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" - DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" - DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" - DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + /* broadcast */ + if (with_bcast) { + config->config_table[1].cam_entry.msb_mac_addr = + cpu_to_le16(0xffff); + config->config_table[1].cam_entry.middle_mac_addr = + cpu_to_le16(0xffff); + config->config_table[1].cam_entry.lsb_mac_addr = + cpu_to_le16(0xffff); + config->config_table[1].cam_entry.flags = cpu_to_le16(port); + if (set) + config->config_table[1].target_table_entry.flags = + TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; + else + CAM_INVALIDATE(config->config_table[1]); + config->config_table[1].target_table_entry.clients_bit_vector = + cpu_to_le32(cl_bit_vec); + config->config_table[1].target_table_entry.vlan_id = 0; + } - return 0; + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, + U64_HI(bnx2x_sp_mapping(bp, mac_config)), + U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); } -static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +/** + * Sets a MAC in a CAM for a few L2 Clients for E1H chip + * + * @param bp driver descriptor + * @param set set or clear an entry (1 or 0) + * @param mac pointer to a buffer containing a MAC + * @param cl_bit_vec bit vector of clients to register a MAC for + * @param cam_offset offset in a CAM to use + */ +static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, + u32 cl_bit_vec, u8 cam_offset) { - struct bnx2x *bp = netdev_priv(dev); - u32 advertising; + struct mac_configuration_cmd_e1h *config = + (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); - if (IS_E1HMF(bp)) - return 0; + config->hdr.length = 1; + config->hdr.offset = cam_offset; + config->hdr.client_id = 0xff; + config->hdr.reserved1 = 0; - DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" - DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" - DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" - DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); - - if (cmd->autoneg == AUTONEG_ENABLE) { - if (!(bp->port.supported & SUPPORTED_Autoneg)) { - DP(NETIF_MSG_LINK, "Autoneg not supported\n"); - return -EINVAL; - } + /* primary MAC */ + config->config_table[0].msb_mac_addr = + swab16(*(u16 *)&mac[0]); + config->config_table[0].middle_mac_addr = + swab16(*(u16 *)&mac[2]); + config->config_table[0].lsb_mac_addr = + swab16(*(u16 *)&mac[4]); + config->config_table[0].clients_bit_vector = + cpu_to_le32(cl_bit_vec); + config->config_table[0].vlan_id = 0; + config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); + if (set) + config->config_table[0].flags = BP_PORT(bp); + else + config->config_table[0].flags = + MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; - /* advertise the requested speed and duplex if supported */ - cmd->advertising &= bp->port.supported; + DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n", + (set ? "setting" : "clearing"), + config->config_table[0].msb_mac_addr, + config->config_table[0].middle_mac_addr, + config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec); - bp->link_params.req_line_speed = SPEED_AUTO_NEG; - bp->link_params.req_duplex = DUPLEX_FULL; - bp->port.advertising |= (ADVERTISED_Autoneg | - cmd->advertising); - - } else { /* forced speed */ - /* advertise the requested speed and duplex if supported */ - switch (cmd->speed) { - case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) { - if (!(bp->port.supported & - SUPPORTED_10baseT_Full)) { - DP(NETIF_MSG_LINK, - "10M full not supported\n"); - return -EINVAL; - } + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, + U64_HI(bnx2x_sp_mapping(bp, mac_config)), + U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); +} - advertising = (ADVERTISED_10baseT_Full | - ADVERTISED_TP); - } else { - if (!(bp->port.supported & - SUPPORTED_10baseT_Half)) { - DP(NETIF_MSG_LINK, - "10M half not supported\n"); - return -EINVAL; - } +static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, + int *state_p, int poll) +{ + /* can take a while if any port is running */ + int cnt = 5000; - advertising = (ADVERTISED_10baseT_Half | - ADVERTISED_TP); - } - break; + DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", + poll ? "polling" : "waiting", state, idx); - case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) { - if (!(bp->port.supported & - SUPPORTED_100baseT_Full)) { - DP(NETIF_MSG_LINK, - "100M full not supported\n"); - return -EINVAL; - } + might_sleep(); + while (cnt--) { + if (poll) { + bnx2x_rx_int(bp->fp, 10); + /* if index is different from 0 + * the reply for some commands will + * be on the non default queue + */ + if (idx) + bnx2x_rx_int(&bp->fp[idx], 10); + } - advertising = (ADVERTISED_100baseT_Full | - ADVERTISED_TP); - } else { - if (!(bp->port.supported & - SUPPORTED_100baseT_Half)) { - DP(NETIF_MSG_LINK, - "100M half not supported\n"); - return -EINVAL; - } + mb(); /* state is changed by bnx2x_sp_event() */ + if (*state_p == state) { +#ifdef BNX2X_STOP_ON_ERROR + DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt); +#endif + return 0; + } - advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_TP); - } - break; + msleep(1); - case SPEED_1000: - if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, "1G half not supported\n"); - return -EINVAL; - } + if (bp->panic) + return -EIO; + } - if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { - DP(NETIF_MSG_LINK, "1G full not supported\n"); - return -EINVAL; - } + /* timeout! */ + BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", + poll ? "polling" : "waiting", state, idx); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif - advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_TP); - break; + return -EBUSY; +} - case SPEED_2500: - if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, - "2.5G half not supported\n"); - return -EINVAL; - } +void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) +{ + bp->set_mac_pending++; + smp_wmb(); - if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { - DP(NETIF_MSG_LINK, - "2.5G full not supported\n"); - return -EINVAL; - } + bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr, + (1 << bp->fp->cl_id), BP_FUNC(bp)); - advertising = (ADVERTISED_2500baseX_Full | - ADVERTISED_TP); - break; + /* Wait for a completion */ + bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); +} - case SPEED_10000: - if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, "10G half not supported\n"); - return -EINVAL; - } +void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) +{ + bp->set_mac_pending++; + smp_wmb(); - if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { - DP(NETIF_MSG_LINK, "10G full not supported\n"); - return -EINVAL; - } + bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr, + (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0), + 1); - advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - break; + /* Wait for a completion */ + bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); +} - default: - DP(NETIF_MSG_LINK, "Unsupported speed\n"); - return -EINVAL; - } +#ifdef BCM_CNIC +/** + * Set iSCSI MAC(s) at the next enties in the CAM after the ETH + * MAC(s). This function will wait until the ramdord completion + * returns. + * + * @param bp driver handle + * @param set set or clear the CAM entry + * + * @return 0 if cussess, -ENODEV if ramrod doesn't return. + */ +int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) +{ + u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); - bp->link_params.req_line_speed = cmd->speed; - bp->link_params.req_duplex = cmd->duplex; - bp->port.advertising = advertising; - } + bp->set_mac_pending++; + smp_wmb(); - DP(NETIF_MSG_LINK, "req_line_speed %d\n" - DP_LEVEL " req_duplex %d advertising 0x%x\n", - bp->link_params.req_line_speed, bp->link_params.req_duplex, - bp->port.advertising); + /* Send a SET_MAC ramrod */ + if (CHIP_IS_E1(bp)) + bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac, + cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2, + 1); + else + /* CAM allocation for E1H + * unicasts: by func number + * multicast: 20+FUNC*20, 20 each + */ + bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac, + cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp)); - if (netif_running(dev)) { - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_link_set(bp); - } + /* Wait for a completion when setting */ + bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); return 0; } +#endif -#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) -#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) - -static int bnx2x_get_regs_len(struct net_device *dev) +int bnx2x_setup_leading(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); - int regdump_len = 0; - int i; - - if (CHIP_IS_E1(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1_ONLINE(reg_addrs[i].info)) - regdump_len += reg_addrs[i].size; + int rc; - for (i = 0; i < WREGS_COUNT_E1; i++) - if (IS_E1_ONLINE(wreg_addrs_e1[i].info)) - regdump_len += wreg_addrs_e1[i].size * - (1 + wreg_addrs_e1[i].read_regs_count); + /* reset IGU state */ + bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); - } else { /* E1H */ - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1H_ONLINE(reg_addrs[i].info)) - regdump_len += reg_addrs[i].size; + /* SETUP ramrod */ + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); - for (i = 0; i < WREGS_COUNT_E1H; i++) - if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) - regdump_len += wreg_addrs_e1h[i].size * - (1 + wreg_addrs_e1h[i].read_regs_count); - } - regdump_len *= 4; - regdump_len += sizeof(struct dump_hdr); + /* Wait for completion */ + rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); - return regdump_len; + return rc; } -static void bnx2x_get_regs(struct net_device *dev, - struct ethtool_regs *regs, void *_p) +int bnx2x_setup_multi(struct bnx2x *bp, int index) { - u32 *p = _p, i, j; - struct bnx2x *bp = netdev_priv(dev); - struct dump_hdr dump_hdr = {0}; + struct bnx2x_fastpath *fp = &bp->fp[index]; - regs->version = 0; - memset(p, 0, regs->len); + /* reset IGU state */ + bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); - if (!netif_running(bp->dev)) - return; + /* SETUP ramrod */ + fp->state = BNX2X_FP_STATE_OPENING; + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, + fp->cl_id, 0); - dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; - dump_hdr.dump_sign = dump_sign_all; - dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); - dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); - dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); - dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); - dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE; + /* Wait for completion */ + return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, + &(fp->state), 0); +} - memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); - p += dump_hdr.hdr_size + 1; - if (CHIP_IS_E1(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1_ONLINE(reg_addrs[i].info)) - for (j = 0; j < reg_addrs[i].size; j++) - *p++ = REG_RD(bp, - reg_addrs[i].addr + j*4); +void bnx2x_set_num_queues_msix(struct bnx2x *bp) +{ - } else { /* E1H */ - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1H_ONLINE(reg_addrs[i].info)) - for (j = 0; j < reg_addrs[i].size; j++) - *p++ = REG_RD(bp, - reg_addrs[i].addr + j*4); + switch (bp->multi_mode) { + case ETH_RSS_MODE_DISABLED: + bp->num_queues = 1; + break; + + case ETH_RSS_MODE_REGULAR: + if (num_queues) + bp->num_queues = min_t(u32, num_queues, + BNX2X_MAX_QUEUES(bp)); + else + bp->num_queues = min_t(u32, num_online_cpus(), + BNX2X_MAX_QUEUES(bp)); + break; + + + default: + bp->num_queues = 1; + break; } } -#define PHY_FW_VER_LEN 10 -static void bnx2x_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) + +static int bnx2x_stop_multi(struct bnx2x *bp, int index) { - struct bnx2x *bp = netdev_priv(dev); - u8 phy_fw_ver[PHY_FW_VER_LEN]; + struct bnx2x_fastpath *fp = &bp->fp[index]; + int rc; + + /* halt the connection */ + fp->state = BNX2X_FP_STATE_HALTING; + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0); - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + /* Wait for completion */ + rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, + &(fp->state), 1); + if (rc) /* timeout */ + return rc; - phy_fw_ver[0] = '\0'; - if (bp->port.pmf) { - bnx2x_acquire_phy_lock(bp); - bnx2x_get_ext_phy_fw_version(&bp->link_params, - (bp->state != BNX2X_STATE_CLOSED), - phy_fw_ver, PHY_FW_VER_LEN); - bnx2x_release_phy_lock(bp); - } + /* delete cfc entry */ + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); - strncpy(info->fw_version, bp->fw_ver, 32); - snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), - "bc %d.%d.%d%s%s", - (bp->common.bc_ver & 0xff0000) >> 16, - (bp->common.bc_ver & 0xff00) >> 8, - (bp->common.bc_ver & 0xff), - ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); - strcpy(info->bus_info, pci_name(bp->pdev)); - info->n_stats = BNX2X_NUM_STATS; - info->testinfo_len = BNX2X_NUM_TESTS; - info->eedump_len = bp->common.flash_size; - info->regdump_len = bnx2x_get_regs_len(dev); + /* Wait for completion */ + rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, + &(fp->state), 1); + return rc; } -static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +static int bnx2x_stop_leading(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); + __le16 dsb_sp_prod_idx; + /* if the other port is handling traffic, + this can take a lot of time */ + int cnt = 500; + int rc; - if (bp->flags & NO_WOL_FLAG) { - wol->supported = 0; - wol->wolopts = 0; - } else { - wol->supported = WAKE_MAGIC; - if (bp->wol) - wol->wolopts = WAKE_MAGIC; - else - wol->wolopts = 0; - } - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} + might_sleep(); -static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct bnx2x *bp = netdev_priv(dev); + /* Send HALT ramrod */ + bp->fp[0].state = BNX2X_FP_STATE_HALTING; + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0); - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; + /* Wait for completion */ + rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, + &(bp->fp[0].state), 1); + if (rc) /* timeout */ + return rc; - if (wol->wolopts & WAKE_MAGIC) { - if (bp->flags & NO_WOL_FLAG) - return -EINVAL; + dsb_sp_prod_idx = *bp->dsb_sp_prod; - bp->wol = 1; - } else - bp->wol = 0; + /* Send PORT_DELETE ramrod */ + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1); - return 0; + /* Wait for completion to arrive on default status block + we are going to reset the chip anyway + so there is not much to do if this times out + */ + while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { + if (!cnt) { + DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " + "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", + *bp->dsb_sp_prod, dsb_sp_prod_idx); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + rc = -EBUSY; + break; + } + cnt--; + msleep(1); + rmb(); /* Refresh the dsb_sp_prod */ + } + bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; + bp->fp[0].state = BNX2X_FP_STATE_CLOSED; + + return rc; } -static u32 bnx2x_get_msglevel(struct net_device *dev) +static void bnx2x_reset_func(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int base, i; + + /* Configure IGU */ + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); - return bp->msg_enable; +#ifdef BCM_CNIC + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); + /* + * Wait for at least 10ms and up to 2 second for the timers scan to + * complete + */ + for (i = 0; i < 200; i++) { + msleep(10); + if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) + break; + } +#endif + /* Clear ILT */ + base = FUNC_ILT_BASE(func); + for (i = base; i < base + ILT_PER_FUNC; i++) + bnx2x_ilt_wr(bp, i, 0); } -static void bnx2x_set_msglevel(struct net_device *dev, u32 level) +static void bnx2x_reset_port(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); + int port = BP_PORT(bp); + u32 val; - if (capable(CAP_NET_ADMIN)) - bp->msg_enable = level; -} + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); -static int bnx2x_nway_reset(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); + /* Do not rcv packets to BRB */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); + /* Do not direct rcv packets that are not for MCP to the BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); - if (!bp->port.pmf) - return 0; + /* Configure AEU */ + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); - if (netif_running(dev)) { - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_link_set(bp); - } + msleep(100); + /* Check for BRB port occupancy */ + val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); + if (val) + DP(NETIF_MSG_IFDOWN, + "BRB1 is not empty %d blocks are occupied\n", val); - return 0; + /* TODO: Close Doorbell port? */ } -static u32 bnx2x_get_link(struct net_device *dev) +static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) { - struct bnx2x *bp = netdev_priv(dev); + DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", + BP_FUNC(bp), reset_code); - if (bp->flags & MF_FUNC_DIS) - return 0; + switch (reset_code) { + case FW_MSG_CODE_DRV_UNLOAD_COMMON: + bnx2x_reset_port(bp); + bnx2x_reset_func(bp); + bnx2x_reset_common(bp); + break; - return bp->link_vars.link_up; -} + case FW_MSG_CODE_DRV_UNLOAD_PORT: + bnx2x_reset_port(bp); + bnx2x_reset_func(bp); + break; -static int bnx2x_get_eeprom_len(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); + case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: + bnx2x_reset_func(bp); + break; - return bp->common.flash_size; + default: + BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code); + break; + } } -static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) { int port = BP_PORT(bp); - int count, i; - u32 val = 0; + u32 reset_code = 0; + int i, cnt, rc; - /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; - if (CHIP_REV_IS_SLOW(bp)) - count *= 100; + /* Wait until tx fastpath tasks complete */ + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + cnt = 1000; + while (bnx2x_has_tx_work_unload(fp)) { + + bnx2x_tx_int(fp); + if (!cnt) { + BNX2X_ERR("timeout waiting for queue[%d]\n", + i); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); + return -EBUSY; +#else + break; +#endif + } + cnt--; + msleep(1); + } + } + /* Give HW time to discard old tx messages */ + msleep(1); - /* request access to nvram interface */ - REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, - (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); + if (CHIP_IS_E1(bp)) { + struct mac_configuration_cmd *config = + bnx2x_sp(bp, mcast_config); - for (i = 0; i < count*10; i++) { - val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); - if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) - break; + bnx2x_set_eth_mac_addr_e1(bp, 0); - udelay(5); - } + for (i = 0; i < config->hdr.length; i++) + CAM_INVALIDATE(config->config_table[i]); - if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { - DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); - return -EBUSY; - } + config->hdr.length = i; + if (CHIP_REV_IS_SLOW(bp)) + config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); + else + config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port); + config->hdr.client_id = bp->fp->cl_id; + config->hdr.reserved1 = 0; - return 0; -} + bp->set_mac_pending++; + smp_wmb(); -static int bnx2x_release_nvram_lock(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int count, i; - u32 val = 0; + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, + U64_HI(bnx2x_sp_mapping(bp, mcast_config)), + U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); - /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; - if (CHIP_REV_IS_SLOW(bp)) - count *= 100; + } else { /* E1H */ + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - /* relinquish nvram interface */ - REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, - (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); + bnx2x_set_eth_mac_addr_e1h(bp, 0); - for (i = 0; i < count*10; i++) { - val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); - if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) - break; + for (i = 0; i < MC_HASH_SIZE; i++) + REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); - udelay(5); + REG_WR(bp, MISC_REG_E1HMF_MODE, 0); } - - if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { - DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); - return -EBUSY; +#ifdef BCM_CNIC + /* Clear iSCSI L2 MAC */ + mutex_lock(&bp->cnic_mutex); + if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) { + bnx2x_set_iscsi_eth_mac_addr(bp, 0); + bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET; } + mutex_unlock(&bp->cnic_mutex); +#endif - return 0; -} - -static void bnx2x_enable_nvram_access(struct bnx2x *bp) -{ - u32 val; + if (unload_mode == UNLOAD_NORMAL) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + else if (bp->flags & NO_WOL_FLAG) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; - /* enable both bits, even on read */ - REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, - (val | MCPR_NVM_ACCESS_ENABLE_EN | - MCPR_NVM_ACCESS_ENABLE_WR_EN)); -} + else if (bp->wol) { + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u8 *mac_addr = bp->dev->dev_addr; + u32 val; + /* The mac address is written to entries 1-4 to + preserve entry 0 which is used by the PMF */ + u8 entry = (BP_E1HVN(bp) + 1)*8; -static void bnx2x_disable_nvram_access(struct bnx2x *bp) -{ - u32 val; + val = (mac_addr[0] << 8) | mac_addr[1]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); - val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); - /* disable both bits, even after read */ - REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, - (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | - MCPR_NVM_ACCESS_ENABLE_WR_EN))); -} + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; -static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, - u32 cmd_flags) -{ - int count, i, rc; - u32 val; + } else + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - /* build the command word */ - cmd_flags |= MCPR_NVM_COMMAND_DOIT; + /* Close multi and leading connections + Completions for ramrods are collected in a synchronous way */ + for_each_nondefault_queue(bp, i) + if (bnx2x_stop_multi(bp, i)) + goto unload_error; - /* need to clear DONE bit separately */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + rc = bnx2x_stop_leading(bp); + if (rc) { + BNX2X_ERR("Stop leading failed!\n"); +#ifdef BNX2X_STOP_ON_ERROR + return -EBUSY; +#else + goto unload_error; +#endif + } - /* address of the NVRAM to read from */ - REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, - (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); +unload_error: + if (!BP_NOMCP(bp)) + reset_code = bnx2x_fw_command(bp, reset_code); + else { + DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", + load_count[0], load_count[1], load_count[2]); + load_count[0]--; + load_count[1 + port]--; + DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n", + load_count[0], load_count[1], load_count[2]); + if (load_count[0] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; + else if (load_count[1 + port] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; + else + reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; + } - /* issue a read command */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); + if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) || + (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) + bnx2x__link_reset(bp); - /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; - if (CHIP_REV_IS_SLOW(bp)) - count *= 100; + /* Reset the chip */ + bnx2x_reset_chip(bp, reset_code); - /* wait for completion */ - *ret_val = 0; - rc = -EBUSY; - for (i = 0; i < count; i++) { - udelay(5); - val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); - - if (val & MCPR_NVM_COMMAND_DONE) { - val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); - /* we read nvram data in cpu order - * but ethtool sees it as an array of bytes - * converting to big-endian will do the work */ - *ret_val = cpu_to_be32(val); - rc = 0; - break; - } - } + /* Report UNLOAD_DONE to MCP */ + if (!BP_NOMCP(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - return rc; } -static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, - int buf_size) +void bnx2x_disable_close_the_gate(struct bnx2x *bp) { - int rc; - u32 cmd_flags; - __be32 val; - - if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_NVM, - "Invalid parameter: offset 0x%x buf_size 0x%x\n", - offset, buf_size); - return -EINVAL; - } + u32 val; - if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", - offset, buf_size, bp->common.flash_size); - return -EINVAL; - } + DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); - /* request access to nvram interface */ - rc = bnx2x_acquire_nvram_lock(bp); - if (rc) - return rc; + if (CHIP_IS_E1(bp)) { + int port = BP_PORT(bp); + u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; - /* enable access to nvram interface */ - bnx2x_enable_nvram_access(bp); + val = REG_RD(bp, addr); + val &= ~(0x300); + REG_WR(bp, addr, val); + } else if (CHIP_IS_E1H(bp)) { + val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); + val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | + MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); + REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); + } +} - /* read the first word(s) */ - cmd_flags = MCPR_NVM_COMMAND_FIRST; - while ((buf_size > sizeof(u32)) && (rc == 0)) { - rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); - memcpy(ret_buf, &val, 4); - /* advance to the next dword */ - offset += sizeof(u32); - ret_buf += sizeof(u32); - buf_size -= sizeof(u32); - cmd_flags = 0; - } +/* Close gates #2, #3 and #4: */ +static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) +{ + u32 val, addr; - if (rc == 0) { - cmd_flags |= MCPR_NVM_COMMAND_LAST; - rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); - memcpy(ret_buf, &val, 4); + /* Gates #2 and #4a are closed/opened for "not E1" only */ + if (!CHIP_IS_E1(bp)) { + /* #4 */ + val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS); + REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, + close ? (val | 0x1) : (val & (~(u32)1))); + /* #2 */ + val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES); + REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, + close ? (val | 0x1) : (val & (~(u32)1))); } - /* disable access to nvram interface */ - bnx2x_disable_nvram_access(bp); - bnx2x_release_nvram_lock(bp); + /* #3 */ + addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + val = REG_RD(bp, addr); + REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1))); - return rc; + DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", + close ? "closing" : "opening"); + mmiowb(); } -static int bnx2x_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *eebuf) -{ - struct bnx2x *bp = netdev_priv(dev); - int rc; - - if (!netif_running(dev)) - return -EAGAIN; - - DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" - DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", - eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, - eeprom->len, eeprom->len); - - /* parameters already validated in ethtool_get_eeprom */ +#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ - rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); +static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) +{ + /* Do some magic... */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + *magic_val = val & SHARED_MF_CLP_MAGIC; + MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); +} - return rc; +/* Restore the value of the `magic' bit. + * + * @param pdev Device handle. + * @param magic_val Old value of the `magic' bit. + */ +static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) +{ + /* Restore the `magic' bit value... */ + /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb); + SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb, + (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + MF_CFG_WR(bp, shared_mf_config.clp_mb, + (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); } -static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, - u32 cmd_flags) +/* Prepares for MCP reset: takes care of CLP configurations. + * + * @param bp + * @param magic_val Old value of 'magic' bit. + */ +static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) { - int count, i, rc; + u32 shmem; + u32 validity_offset; - /* build the command word */ - cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; + DP(NETIF_MSG_HW, "Starting\n"); - /* need to clear DONE bit separately */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + /* Set `magic' bit in order to save MF config */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_prep(bp, magic_val); - /* write the data */ - REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); + /* Get shmem offset */ + shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + validity_offset = offsetof(struct shmem_region, validity_map[0]); - /* address of the NVRAM to write to */ - REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, - (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); + /* Clear validity map flags */ + if (shmem > 0) + REG_WR(bp, shmem + validity_offset, 0); +} - /* issue the write command */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); +#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ +#define MCP_ONE_TIMEOUT 100 /* 100 ms */ - /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; +/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10, + * depending on the HW type. + * + * @param bp + */ +static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) +{ + /* special handling for emulation and FPGA, + wait 10 times longer */ if (CHIP_REV_IS_SLOW(bp)) - count *= 100; - - /* wait for completion */ - rc = -EBUSY; - for (i = 0; i < count; i++) { - udelay(5); - val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); - if (val & MCPR_NVM_COMMAND_DONE) { - rc = 0; - break; - } - } - - return rc; + msleep(MCP_ONE_TIMEOUT*10); + else + msleep(MCP_ONE_TIMEOUT); } -#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) - -static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, - int buf_size) +static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) { - int rc; - u32 cmd_flags; - u32 align_offset; - __be32 val; - - if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", - offset, buf_size, bp->common.flash_size); - return -EINVAL; + u32 shmem, cnt, validity_offset, val; + int rc = 0; + + msleep(100); + + /* Get shmem offset */ + shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + if (shmem == 0) { + BNX2X_ERR("Shmem 0 return failure\n"); + rc = -ENOTTY; + goto exit_lbl; } - /* request access to nvram interface */ - rc = bnx2x_acquire_nvram_lock(bp); - if (rc) - return rc; + validity_offset = offsetof(struct shmem_region, validity_map[0]); - /* enable access to nvram interface */ - bnx2x_enable_nvram_access(bp); + /* Wait for MCP to come up */ + for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) { + /* TBD: its best to check validity map of last port. + * currently checks on port 0. + */ + val = REG_RD(bp, shmem + validity_offset); + DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem, + shmem + validity_offset, val); - cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); - align_offset = (offset & ~0x03); - rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); + /* check that shared memory is valid. */ + if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + break; - if (rc == 0) { - val &= ~(0xff << BYTE_OFFSET(offset)); - val |= (*data_buf << BYTE_OFFSET(offset)); + bnx2x_mcp_wait_one(bp); + } - /* nvram data is returned as an array of bytes - * convert it back to cpu order */ - val = be32_to_cpu(val); + DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val); - rc = bnx2x_nvram_write_dword(bp, align_offset, val, - cmd_flags); + /* Check that shared memory is valid. This indicates that MCP is up. */ + if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != + (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { + BNX2X_ERR("Shmem signature not present. MCP is not up !!\n"); + rc = -ENOTTY; + goto exit_lbl; } - /* disable access to nvram interface */ - bnx2x_disable_nvram_access(bp); - bnx2x_release_nvram_lock(bp); +exit_lbl: + /* Restore the `magic' bit value */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_done(bp, magic_val); return rc; } -static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, - int buf_size) +static void bnx2x_pxp_prep(struct bnx2x *bp) { - int rc; - u32 cmd_flags; - u32 val; - u32 written_so_far; - - if (buf_size == 1) /* ethtool */ - return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); - - if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_NVM, - "Invalid parameter: offset 0x%x buf_size 0x%x\n", - offset, buf_size); - return -EINVAL; - } - - if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", - offset, buf_size, bp->common.flash_size); - return -EINVAL; + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, PXP2_REG_RD_START_INIT, 0); + REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); + REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0); + mmiowb(); } +} - /* request access to nvram interface */ - rc = bnx2x_acquire_nvram_lock(bp); - if (rc) - return rc; +/* + * Reset the whole chip except for: + * - PCIE core + * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by + * one reset bit) + * - IGU + * - MISC (including AEU) + * - GRC + * - RBCN, RBCP + */ +static void bnx2x_process_kill_chip_reset(struct bnx2x *bp) +{ + u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; - /* enable access to nvram interface */ - bnx2x_enable_nvram_access(bp); + not_reset_mask1 = + MISC_REGISTERS_RESET_REG_1_RST_HC | + MISC_REGISTERS_RESET_REG_1_RST_PXPV | + MISC_REGISTERS_RESET_REG_1_RST_PXP; - written_so_far = 0; - cmd_flags = MCPR_NVM_COMMAND_FIRST; - while ((written_so_far < buf_size) && (rc == 0)) { - if (written_so_far == (buf_size - sizeof(u32))) - cmd_flags |= MCPR_NVM_COMMAND_LAST; - else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) - cmd_flags |= MCPR_NVM_COMMAND_LAST; - else if ((offset % NVRAM_PAGE_SIZE) == 0) - cmd_flags |= MCPR_NVM_COMMAND_FIRST; + not_reset_mask2 = + MISC_REGISTERS_RESET_REG_2_RST_MDIO | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | + MISC_REGISTERS_RESET_REG_2_RST_RBCN | + MISC_REGISTERS_RESET_REG_2_RST_GRC | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B; - memcpy(&val, data_buf, 4); + reset_mask1 = 0xffffffff; - rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); + if (CHIP_IS_E1(bp)) + reset_mask2 = 0xffff; + else + reset_mask2 = 0x1ffff; - /* advance to the next dword */ - offset += sizeof(u32); - data_buf += sizeof(u32); - written_so_far += sizeof(u32); - cmd_flags = 0; - } + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + reset_mask1 & (~not_reset_mask1)); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + reset_mask2 & (~not_reset_mask2)); - /* disable access to nvram interface */ - bnx2x_disable_nvram_access(bp); - bnx2x_release_nvram_lock(bp); + barrier(); + mmiowb(); - return rc; + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2); + mmiowb(); } -static int bnx2x_set_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *eebuf) +static int bnx2x_process_kill(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); - int port = BP_PORT(bp); - int rc = 0; + int cnt = 1000; + u32 val = 0; + u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; - if (!netif_running(dev)) - return -EAGAIN; - DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" - DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", - eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, - eeprom->len, eeprom->len); + /* Empty the Tetris buffer, wait for 1s */ + do { + sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); + blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); + port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); + port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); + pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); + if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && + ((port_is_idle_0 & 0x1) == 0x1) && + ((port_is_idle_1 & 0x1) == 0x1) && + (pgl_exp_rom2 == 0xffffffff)) + break; + msleep(1); + } while (cnt-- > 0); - /* parameters already validated in ethtool_set_eeprom */ + if (cnt <= 0) { + DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" + " are still" + " outstanding read requests after 1s!\n"); + DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," + " port_is_idle_0=0x%08x," + " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", + sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, + pgl_exp_rom2); + return -EAGAIN; + } - /* PHY eeprom can be accessed only by the PMF */ - if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && - !bp->port.pmf) - return -EINVAL; + barrier(); - if (eeprom->magic == 0x50485950) { - /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ - bnx2x_stats_handle(bp, STATS_EVENT_STOP); + /* Close gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, true); - bnx2x_acquire_phy_lock(bp); - rc |= bnx2x_link_reset(&bp->link_params, - &bp->link_vars, 0); - if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_HIGH, port); - bnx2x_release_phy_lock(bp); - bnx2x_link_report(bp); + /* TBD: Indicate that "process kill" is in progress to MCP */ - } else if (eeprom->magic == 0x50485952) { - /* 'PHYR' (0x50485952): re-init link after FW upgrade */ - if (bp->state == BNX2X_STATE_OPEN) { - bnx2x_acquire_phy_lock(bp); - rc |= bnx2x_link_reset(&bp->link_params, - &bp->link_vars, 1); + /* Clear "unprepared" bit */ + REG_WR(bp, MISC_REG_UNPREPARED, 0); + barrier(); - rc |= bnx2x_phy_init(&bp->link_params, - &bp->link_vars); - bnx2x_release_phy_lock(bp); - bnx2x_calc_fc_adv(bp); - } - } else if (eeprom->magic == 0x53985943) { - /* 'PHYC' (0x53985943): PHY FW upgrade completed */ - if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { - u8 ext_phy_addr = - XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); + /* Make sure all is written to the chip before the reset */ + mmiowb(); - /* DSP Remove Download Mode */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_LOW, port); + /* Wait for 1ms to empty GLUE and PCI-E core queues, + * PSWHST, GRC and PSWRD Tetris buffer. + */ + msleep(1); - bnx2x_acquire_phy_lock(bp); + /* Prepare to chip reset: */ + /* MCP */ + bnx2x_reset_mcp_prep(bp, &val); - bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); + /* PXP */ + bnx2x_pxp_prep(bp); + barrier(); - /* wait 0.5 sec to allow it to run */ - msleep(500); - bnx2x_ext_phy_hw_reset(bp, port); - msleep(500); - bnx2x_release_phy_lock(bp); - } - } else - rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); + /* reset the chip */ + bnx2x_process_kill_chip_reset(bp); + barrier(); - return rc; -} + /* Recover after reset: */ + /* MCP */ + if (bnx2x_reset_mcp_comp(bp, val)) + return -EAGAIN; -static int bnx2x_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) -{ - struct bnx2x *bp = netdev_priv(dev); + /* PXP */ + bnx2x_pxp_prep(bp); - memset(coal, 0, sizeof(struct ethtool_coalesce)); + /* Open the gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, false); - coal->rx_coalesce_usecs = bp->rx_ticks; - coal->tx_coalesce_usecs = bp->tx_ticks; + /* TBD: IGU/AEU preparation bring back the AEU/IGU to a + * reset state, re-enable attentions. */ return 0; } -static int bnx2x_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) +static int bnx2x_leader_reset(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); - - bp->rx_ticks = (u16)coal->rx_coalesce_usecs; - if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) - bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; - - bp->tx_ticks = (u16)coal->tx_coalesce_usecs; - if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) - bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; + int rc = 0; + /* Try to recover after the failure */ + if (bnx2x_process_kill(bp)) { + printk(KERN_ERR "%s: Something bad had happen! Aii!\n", + bp->dev->name); + rc = -EAGAIN; + goto exit_leader_reset; + } - if (netif_running(dev)) - bnx2x_update_coalesce(bp); + /* Clear "reset is in progress" bit and update the driver state */ + bnx2x_set_reset_done(bp); + bp->recovery_state = BNX2X_RECOVERY_DONE; - return 0; +exit_leader_reset: + bp->is_leader = 0; + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); + smp_wmb(); + return rc; } -static void bnx2x_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) +/* Assumption: runs under rtnl lock. This together with the fact + * that it's called only from bnx2x_reset_task() ensure that it + * will never be called when netif_running(bp->dev) is false. + */ +static void bnx2x_parity_recover(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); + DP(NETIF_MSG_HW, "Handling parity\n"); + while (1) { + switch (bp->recovery_state) { + case BNX2X_RECOVERY_INIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); + /* Try to get a LEADER_LOCK HW lock */ + if (bnx2x_trylock_hw_lock(bp, + HW_LOCK_RESOURCE_RESERVED_08)) + bp->is_leader = 1; + + /* Stop the driver */ + /* If interface has been removed - break */ + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + return; + + bp->recovery_state = BNX2X_RECOVERY_WAIT; + /* Ensure "is_leader" and "recovery_state" + * update values are seen on other CPUs + */ + smp_wmb(); + break; + + case BNX2X_RECOVERY_WAIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); + if (bp->is_leader) { + u32 load_counter = bnx2x_get_load_cnt(bp); + if (load_counter) { + /* Wait until all other functions get + * down. + */ + schedule_delayed_work(&bp->reset_task, + HZ/10); + return; + } else { + /* If all other functions got down - + * try to bring the chip back to + * normal. In any case it's an exit + * point for a leader. + */ + if (bnx2x_leader_reset(bp) || + bnx2x_nic_load(bp, LOAD_NORMAL)) { + printk(KERN_ERR"%s: Recovery " + "has failed. Power cycle is " + "needed.\n", bp->dev->name); + /* Disconnect this device */ + netif_device_detach(bp->dev); + /* Block ifup for all function + * of this ASIC until + * "process kill" or power + * cycle. + */ + bnx2x_set_reset_in_progress(bp); + /* Shut down the power */ + bnx2x_set_power_state(bp, + PCI_D3hot); + return; + } - ering->rx_max_pending = MAX_RX_AVAIL; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; + return; + } + } else { /* non-leader */ + if (!bnx2x_reset_is_done(bp)) { + /* Try to get a LEADER_LOCK HW lock as + * long as a former leader may have + * been unloaded by the user or + * released a leadership by another + * reason. + */ + if (bnx2x_trylock_hw_lock(bp, + HW_LOCK_RESOURCE_RESERVED_08)) { + /* I'm a leader now! Restart a + * switch case. + */ + bp->is_leader = 1; + break; + } - ering->rx_pending = bp->rx_ring_size; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = 0; + schedule_delayed_work(&bp->reset_task, + HZ/10); + return; - ering->tx_max_pending = MAX_TX_AVAIL; - ering->tx_pending = bp->tx_ring_size; + } else { /* A leader has completed + * the "process kill". It's an exit + * point for a non-leader. + */ + bnx2x_nic_load(bp, LOAD_NORMAL); + bp->recovery_state = + BNX2X_RECOVERY_DONE; + smp_wmb(); + return; + } + } + default: + return; + } + } } -static int bnx2x_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) +/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is + * scheduled on a general queue in order to prevent a dead lock. + */ +static void bnx2x_reset_task(struct work_struct *work) { - struct bnx2x *bp = netdev_priv(dev); - int rc = 0; + struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work); - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("reset task called but STOP_ON_ERROR defined" + " so reset not done to allow debug dump,\n" + KERN_ERR " you will need to reboot when done\n"); + return; +#endif - if ((ering->rx_pending > MAX_RX_AVAIL) || - (ering->tx_pending > MAX_TX_AVAIL) || - (ering->tx_pending <= MAX_SKB_FRAGS + 4)) - return -EINVAL; + rtnl_lock(); - bp->rx_ring_size = ering->rx_pending; - bp->tx_ring_size = ering->tx_pending; + if (!netif_running(bp->dev)) + goto reset_task_exit; - if (netif_running(dev)) { + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) + bnx2x_parity_recover(bp); + else { bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); + bnx2x_nic_load(bp, LOAD_NORMAL); } - return rc; +reset_task_exit: + rtnl_unlock(); } -static void bnx2x_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct bnx2x *bp = netdev_priv(dev); - - epause->autoneg = (bp->link_params.req_flow_ctrl == - BNX2X_FLOW_CTRL_AUTO) && - (bp->link_params.req_line_speed == SPEED_AUTO_NEG); - - epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == - BNX2X_FLOW_CTRL_RX); - epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) == - BNX2X_FLOW_CTRL_TX); +/* end of nic load/unload */ - DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" - DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", - epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); -} +/* + * Init service functions + */ -static int bnx2x_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) +static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func) { - struct bnx2x *bp = netdev_priv(dev); - - if (IS_E1HMF(bp)) - return 0; - - DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" - DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", - epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); - - bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; - - if (epause->rx_pause) - bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX; - - if (epause->tx_pause) - bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX; - - if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) - bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; - - if (epause->autoneg) { - if (!(bp->port.supported & SUPPORTED_Autoneg)) { - DP(NETIF_MSG_LINK, "autoneg not supported\n"); - return -EINVAL; - } - - if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) - bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; - } - - DP(NETIF_MSG_LINK, - "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); - - if (netif_running(dev)) { - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_link_set(bp); + switch (func) { + case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0; + case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1; + case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2; + case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3; + case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4; + case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5; + case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6; + case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7; + default: + BNX2X_ERR("Unsupported function index: %d\n", func); + return (u32)(-1); } - - return 0; } -static int bnx2x_set_flags(struct net_device *dev, u32 data) +static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func) { - struct bnx2x *bp = netdev_priv(dev); - int changed = 0; - int rc = 0; + u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val; - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } + /* Flush all outstanding writes */ + mmiowb(); - /* TPA requires Rx CSUM offloading */ - if ((data & ETH_FLAG_LRO) && bp->rx_csum) { - if (!disable_tpa) { - if (!(dev->features & NETIF_F_LRO)) { - dev->features |= NETIF_F_LRO; - bp->flags |= TPA_ENABLE_FLAG; - changed = 1; - } - } else - rc = -EINVAL; - } else if (dev->features & NETIF_F_LRO) { - dev->features &= ~NETIF_F_LRO; - bp->flags &= ~TPA_ENABLE_FLAG; - changed = 1; + /* Pretend to be function 0 */ + REG_WR(bp, reg, 0); + /* Flush the GRC transaction (in the chip) */ + new_val = REG_RD(bp, reg); + if (new_val != 0) { + BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n", + new_val); + BUG(); } - if (data & ETH_FLAG_RXHASH) - dev->features |= NETIF_F_RXHASH; - else - dev->features &= ~NETIF_F_RXHASH; + /* From now we are in the "like-E1" mode */ + bnx2x_int_disable(bp); - if (changed && netif_running(dev)) { - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); - } + /* Flush all outstanding writes */ + mmiowb(); - return rc; + /* Restore the original funtion settings */ + REG_WR(bp, reg, orig_func); + new_val = REG_RD(bp, reg); + if (new_val != orig_func) { + BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n", + orig_func, new_val); + BUG(); + } } -static u32 bnx2x_get_rx_csum(struct net_device *dev) +static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func) { - struct bnx2x *bp = netdev_priv(dev); - - return bp->rx_csum; + if (CHIP_IS_E1H(bp)) + bnx2x_undi_int_disable_e1h(bp, func); + else + bnx2x_int_disable(bp); } -static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) +static void __devinit bnx2x_undi_unload(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); - int rc = 0; - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } - - bp->rx_csum = data; - - /* Disable TPA, when Rx CSUM is disabled. Otherwise all - TPA'ed packets will be discarded due to wrong TCP CSUM */ - if (!data) { - u32 flags = ethtool_op_get_flags(dev); + u32 val; - rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO)); - } + /* Check if there is any driver already loaded */ + val = REG_RD(bp, MISC_REG_UNPREPARED); + if (val == 0x1) { + /* Check if it is the UNDI driver + * UNDI driver initializes CID offset for normal bell to 0x7 + */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); + if (val == 0x7) { + u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + /* save our func */ + int func = BP_FUNC(bp); + u32 swap_en; + u32 swap_val; - return rc; -} + /* clear the UNDI indication */ + REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); -static int bnx2x_set_tso(struct net_device *dev, u32 data) -{ - if (data) { - dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); - dev->features |= NETIF_F_TSO6; - } else { - dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN); - dev->features &= ~NETIF_F_TSO6; - } + BNX2X_DEV_INFO("UNDI is active! reset device\n"); - return 0; -} + /* try unload UNDI on port 0 */ + bp->func = 0; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + reset_code = bnx2x_fw_command(bp, reset_code); -static const struct { - char string[ETH_GSTRING_LEN]; -} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { - { "register_test (offline)" }, - { "memory_test (offline)" }, - { "loopback_test (offline)" }, - { "nvram_test (online)" }, - { "interrupt_test (online)" }, - { "link_test (online)" }, - { "idle check (online)" } -}; + /* if UNDI is loaded on the other port */ + if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { -static int bnx2x_test_registers(struct bnx2x *bp) -{ - int idx, i, rc = -ENODEV; - u32 wr_val = 0; - int port = BP_PORT(bp); - static const struct { - u32 offset0; - u32 offset1; - u32 mask; - } reg_tbl[] = { -/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, - { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, - { HC_REG_AGG_INT_0, 4, 0x000003ff }, - { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, - { PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, - { PRS_REG_CID_PORT_0, 4, 0x00ffffff }, - { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, - { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, - { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, - { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, -/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, - { QM_REG_CONNNUM_0, 4, 0x000fffff }, - { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, - { SRC_REG_KEYRSS0_0, 40, 0xffffffff }, - { SRC_REG_KEYRSS0_7, 40, 0xffffffff }, - { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, - { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, - { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, - { NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, - { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, -/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, - { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, - { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, - { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, - { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, - { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, - { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, - { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, - { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, - { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, -/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, - { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, - { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, - { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 }, - { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, - { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, - { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, - - { 0xffffffff, 0, 0x00000000 } - }; + /* send "DONE" for previous unload */ + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - if (!netif_running(bp->dev)) - return rc; + /* unload UNDI on port 1 */ + bp->func = 1; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - /* Repeat the test twice: - First by writing 0x00000000, second by writing 0xffffffff */ - for (idx = 0; idx < 2; idx++) { + bnx2x_fw_command(bp, reset_code); + } - switch (idx) { - case 0: - wr_val = 0; - break; - case 1: - wr_val = 0xffffffff; - break; - } + /* now it's safe to release the lock */ + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { - u32 offset, mask, save_val, val; + bnx2x_undi_int_disable(bp, func); - offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; - mask = reg_tbl[i].mask; + /* close input traffic and wait for it */ + /* Do not rcv packets to BRB */ + REG_WR(bp, + (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : + NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); + /* Do not direct rcv packets that are not for MCP to + * the BRB */ + REG_WR(bp, + (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + /* clear AEU */ + REG_WR(bp, + (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); + msleep(10); - save_val = REG_RD(bp, offset); + /* save NIG port swap info */ + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + /* reset device */ + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0xd3ffffff); + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + 0x1403); + /* take the NIG out of reset and restore swap values */ + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, + MISC_REGISTERS_RESET_REG_1_RST_NIG); + REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); + REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); - REG_WR(bp, offset, (wr_val & mask)); - val = REG_RD(bp, offset); + /* send unload done to the MCP */ + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); - /* Restore the original register's value */ - REG_WR(bp, offset, save_val); + /* restore our func and fw_seq */ + bp->func = func; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); - /* verify value is as expected */ - if ((val & mask) != (wr_val & mask)) { - DP(NETIF_MSG_PROBE, - "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", - offset, val, wr_val, mask); - goto test_reg_exit; - } - } + } else + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); } - - rc = 0; - -test_reg_exit: - return rc; } -static int bnx2x_test_memory(struct bnx2x *bp) +static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) { - int i, j, rc = -ENODEV; - u32 val; - static const struct { - u32 offset; - int size; - } mem_tbl[] = { - { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, - { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, - { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, - { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, - { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, - { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, - { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, - - { 0xffffffff, 0 } - }; - static const struct { - char *name; - u32 offset; - u32 e1_mask; - u32 e1h_mask; - } prty_tbl[] = { - { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, - { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, - { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, - { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, - { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, - { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, - - { NULL, 0xffffffff, 0, 0 } - }; + u32 val, val2, val3, val4, id; + u16 pmc; - if (!netif_running(bp->dev)) - return rc; + /* Get the chip revision id and number. */ + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + val = REG_RD(bp, MISC_REG_CHIP_NUM); + id = ((val & 0xffff) << 16); + val = REG_RD(bp, MISC_REG_CHIP_REV); + id |= ((val & 0xf) << 12); + val = REG_RD(bp, MISC_REG_CHIP_METAL); + id |= ((val & 0xff) << 4); + val = REG_RD(bp, MISC_REG_BOND_ID); + id |= (val & 0xf); + bp->common.chip_id = id; + bp->link_params.chip_id = bp->common.chip_id; + BNX2X_DEV_INFO("chip ID is 0x%x\n", id); - /* Go through all the memories */ - for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) - for (j = 0; j < mem_tbl[i].size; j++) - REG_RD(bp, mem_tbl[i].offset + j*4); - - /* Check the parity status */ - for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { - val = REG_RD(bp, prty_tbl[i].offset); - if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || - (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { - DP(NETIF_MSG_HW, - "%s is 0x%x\n", prty_tbl[i].name, val); - goto test_mem_exit; - } + val = (REG_RD(bp, 0x2874) & 0x55); + if ((bp->common.chip_id & 0x1) || + (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { + bp->flags |= ONE_PORT_FLAG; + BNX2X_DEV_INFO("single port device\n"); } - rc = 0; - -test_mem_exit: - return rc; -} - -static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) -{ - int cnt = 1000; + val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); + bp->common.flash_size = (NVRAM_1MB_SIZE << + (val & MCPR_NVM_CFG4_FLASH_SIZE)); + BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", + bp->common.flash_size, bp->common.flash_size); - if (link_up) - while (bnx2x_link_test(bp) && cnt--) - msleep(10); -} + bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); + bp->link_params.shmem_base = bp->common.shmem_base; + BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", + bp->common.shmem_base, bp->common.shmem2_base); -static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) -{ - unsigned int pkt_size, num_pkts, i; - struct sk_buff *skb; - unsigned char *packet; - struct bnx2x_fastpath *fp_rx = &bp->fp[0]; - struct bnx2x_fastpath *fp_tx = &bp->fp[0]; - u16 tx_start_idx, tx_idx; - u16 rx_start_idx, rx_idx; - u16 pkt_prod, bd_prod; - struct sw_tx_bd *tx_buf; - struct eth_tx_start_bd *tx_start_bd; - struct eth_tx_parse_bd *pbd = NULL; - dma_addr_t mapping; - union eth_rx_cqe *cqe; - u8 cqe_fp_flags; - struct sw_rx_bd *rx_buf; - u16 len; - int rc = -ENODEV; - - /* check the loopback mode */ - switch (loopback_mode) { - case BNX2X_PHY_LOOPBACK: - if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10) - return -EINVAL; - break; - case BNX2X_MAC_LOOPBACK: - bp->link_params.loopback_mode = LOOPBACK_BMAC; - bnx2x_phy_init(&bp->link_params, &bp->link_vars); - break; - default: - return -EINVAL; + if (!bp->common.shmem_base || + (bp->common.shmem_base < 0xA0000) || + (bp->common.shmem_base >= 0xC0000)) { + BNX2X_DEV_INFO("MCP not active\n"); + bp->flags |= NO_MCP_FLAG; + return; } - /* prepare the loopback packet */ - pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? - bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); - skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); - if (!skb) { - rc = -ENOMEM; - goto test_loopback_exit; - } - packet = skb_put(skb, pkt_size); - memcpy(packet, bp->dev->dev_addr, ETH_ALEN); - memset(packet + ETH_ALEN, 0, ETH_ALEN); - memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); - for (i = ETH_HLEN; i < pkt_size; i++) - packet[i] = (unsigned char) (i & 0xff); - - /* send the loopback packet */ - num_pkts = 0; - tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb); - rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); - - pkt_prod = fp_tx->tx_pkt_prod++; - tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)]; - tx_buf->first_bd = fp_tx->tx_bd_prod; - tx_buf->skb = skb; - tx_buf->flags = 0; - - bd_prod = TX_BD(fp_tx->tx_bd_prod); - tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; - mapping = dma_map_single(&bp->pdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ - tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); - tx_start_bd->vlan = cpu_to_le16(pkt_prod); - tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - tx_start_bd->general_data = ((UNICAST_ADDRESS << - ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); - - /* turn on parsing and get a BD */ - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd; - - memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); + val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); + if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + BNX2X_ERROR("BAD MCP validity signature\n"); - wmb(); + bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); + BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); - fp_tx->tx_db.data.prod += 2; - barrier(); - DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); + bp->link_params.hw_led_mode = ((bp->common.hw_config & + SHARED_HW_CFG_LED_MODE_MASK) >> + SHARED_HW_CFG_LED_MODE_SHIFT); - mmiowb(); + bp->link_params.feature_config_flags = 0; + val = SHMEM_RD(bp, dev_info.shared_feature_config.config); + if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) + bp->link_params.feature_config_flags |= + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + else + bp->link_params.feature_config_flags &= + ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; - num_pkts++; - fp_tx->tx_bd_prod += 2; /* start + pbd */ + val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; + bp->common.bc_ver = val; + BNX2X_DEV_INFO("bc_ver %X\n", val); + if (val < BNX2X_BC_VER) { + /* for now only warn + * later we might need to enforce this */ + BNX2X_ERROR("This driver needs bc_ver %X but found %X, " + "please upgrade BC\n", BNX2X_BC_VER, val); + } + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? + FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; - udelay(100); + if (BP_E1HVN(bp) == 0) { + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); + bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; + } else { + /* no WOL capability for E1HVN != 0 */ + bp->flags |= NO_WOL_FLAG; + } + BNX2X_DEV_INFO("%sWoL capable\n", + (bp->flags & NO_WOL_FLAG) ? "not " : ""); - tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb); - if (tx_idx != tx_start_idx + num_pkts) - goto test_loopback_exit; + val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); + val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); + val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); + val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); - rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); - if (rx_idx != rx_start_idx + num_pkts) - goto test_loopback_exit; + dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", + val, val2, val3, val4); +} - cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; - cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; - if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) - goto test_loopback_rx_exit; +static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, + u32 switch_cfg) +{ + int port = BP_PORT(bp); + u32 ext_phy_type; - len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); - if (len != pkt_size) - goto test_loopback_rx_exit; + switch (switch_cfg) { + case SWITCH_CFG_1G: + BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg); - rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; - skb = rx_buf->skb; - skb_reserve(skb, cqe->fast_path_cqe.placement_offset); - for (i = ETH_HLEN; i < pkt_size; i++) - if (*(skb->data + i) != (unsigned char) (i & 0xff)) - goto test_loopback_rx_exit; + ext_phy_type = + SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config); + switch (ext_phy_type) { + case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: + BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", + ext_phy_type); - rc = 0; + bp->port.supported |= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_TP | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; -test_loopback_rx_exit: + case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: + BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", + ext_phy_type); - fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); - fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); - fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); - fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); + bp->port.supported |= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_TP | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - /* Update producers */ - bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, - fp_rx->rx_sge_prod); + default: + BNX2X_ERR("NVRAM config error. " + "BAD SerDes ext_phy_config 0x%x\n", + bp->link_params.ext_phy_config); + return; + } -test_loopback_exit: - bp->link_params.loopback_mode = LOOPBACK_NONE; + bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + + port*0x10); + BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); + break; - return rc; -} + case SWITCH_CFG_10G: + BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg); -static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) -{ - int rc = 0, res; + ext_phy_type = + XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); + switch (ext_phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n", + ext_phy_type); - if (BP_NOMCP(bp)) - return rc; + bp->port.supported |= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - if (!netif_running(bp->dev)) - return BNX2X_LOOPBACK_FAILED; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: + BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n", + ext_phy_type); - bnx2x_netif_stop(bp, 1); - bnx2x_acquire_phy_lock(bp); + bp->port.supported |= (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up); - if (res) { - DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); - rc |= BNX2X_PHY_LOOPBACK_FAILED; - } + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n", + ext_phy_type); - res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up); - if (res) { - DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); - rc |= BNX2X_MAC_LOOPBACK_FAILED; - } + bp->port.supported |= (SUPPORTED_10000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - bnx2x_release_phy_lock(bp); - bnx2x_netif_start(bp); + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: + BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n", + ext_phy_type); - return rc; -} + bp->port.supported |= (SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; -#define CRC32_RESIDUAL 0xdebb20e3 - -static int bnx2x_test_nvram(struct bnx2x *bp) -{ - static const struct { - int offset; - int size; - } nvram_tbl[] = { - { 0, 0x14 }, /* bootstrap */ - { 0x14, 0xec }, /* dir */ - { 0x100, 0x350 }, /* manuf_info */ - { 0x450, 0xf0 }, /* feature_info */ - { 0x640, 0x64 }, /* upgrade_key_info */ - { 0x6a4, 0x64 }, - { 0x708, 0x70 }, /* manuf_key_info */ - { 0x778, 0x70 }, - { 0, 0 } - }; - __be32 buf[0x350 / 4]; - u8 *data = (u8 *)buf; - int i, rc; - u32 magic, crc; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: + BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n", + ext_phy_type); - if (BP_NOMCP(bp)) - return 0; + bp->port.supported |= (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - rc = bnx2x_nvram_read(bp, 0, data, 4); - if (rc) { - DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); - goto test_nvram_exit; - } + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n", + ext_phy_type); - magic = be32_to_cpu(buf[0]); - if (magic != 0x669955aa) { - DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); - rc = -ENODEV; - goto test_nvram_exit; - } + bp->port.supported |= (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - for (i = 0; nvram_tbl[i].size; i++) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n", + ext_phy_type); - rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, - nvram_tbl[i].size); - if (rc) { - DP(NETIF_MSG_PROBE, - "nvram_tbl[%d] read data (rc %d)\n", i, rc); - goto test_nvram_exit; - } + bp->port.supported |= (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - crc = ether_crc_le(nvram_tbl[i].size, data); - if (crc != CRC32_RESIDUAL) { - DP(NETIF_MSG_PROBE, - "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); - rc = -ENODEV; - goto test_nvram_exit; - } - } + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n", + ext_phy_type); -test_nvram_exit: - return rc; -} + bp->port.supported |= (SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; -static int bnx2x_test_intr(struct bnx2x *bp) -{ - struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); - int i, rc; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: + BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n", + ext_phy_type); - if (!netif_running(bp->dev)) - return -ENODEV; + bp->port.supported |= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; - config->hdr.length = 0; - if (CHIP_IS_E1(bp)) - /* use last unicast entries */ - config->hdr.offset = (BP_PORT(bp) ? 63 : 31); - else - config->hdr.offset = BP_FUNC(bp); - config->hdr.client_id = bp->fp->cl_id; - config->hdr.reserved1 = 0; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + BNX2X_ERR("XGXS PHY Failure detected 0x%x\n", + bp->link_params.ext_phy_config); + break; - bp->set_mac_pending++; - smp_wmb(); - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, - U64_HI(bnx2x_sp_mapping(bp, mac_config)), - U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); - if (rc == 0) { - for (i = 0; i < 10; i++) { - if (!bp->set_mac_pending) - break; - smp_rmb(); - msleep_interruptible(10); + default: + BNX2X_ERR("NVRAM config error. " + "BAD XGXS ext_phy_config 0x%x\n", + bp->link_params.ext_phy_config); + return; } - if (i == 10) - rc = -ENODEV; - } - return rc; -} + bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + + port*0x18); + BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); -static void bnx2x_self_test(struct net_device *dev, - struct ethtool_test *etest, u64 *buf) -{ - struct bnx2x *bp = netdev_priv(dev); + break; - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - etest->flags |= ETH_TEST_FL_FAILED; + default: + BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", + bp->port.link_config); return; } + bp->link_params.phy_addr = bp->port.phy_addr; - memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); - - if (!netif_running(dev)) - return; - - /* offline tests are not supported in MF mode */ - if (IS_E1HMF(bp)) - etest->flags &= ~ETH_TEST_FL_OFFLINE; - - if (etest->flags & ETH_TEST_FL_OFFLINE) { - int port = BP_PORT(bp); - u32 val; - u8 link_up; + /* mask what we support according to speed_cap_mask */ + if (!(bp->link_params.speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) + bp->port.supported &= ~SUPPORTED_10baseT_Half; - /* save current value of input enable for TX port IF */ - val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); - /* disable input for TX port IF */ - REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); + if (!(bp->link_params.speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) + bp->port.supported &= ~SUPPORTED_10baseT_Full; - link_up = (bnx2x_link_test(bp) == 0); - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - bnx2x_nic_load(bp, LOAD_DIAG); - /* wait until link state is restored */ - bnx2x_wait_for_link(bp, link_up); + if (!(bp->link_params.speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) + bp->port.supported &= ~SUPPORTED_100baseT_Half; - if (bnx2x_test_registers(bp) != 0) { - buf[0] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if (bnx2x_test_memory(bp) != 0) { - buf[1] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - buf[2] = bnx2x_test_loopback(bp, link_up); - if (buf[2] != 0) - etest->flags |= ETH_TEST_FL_FAILED; + if (!(bp->link_params.speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) + bp->port.supported &= ~SUPPORTED_100baseT_Full; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + if (!(bp->link_params.speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) + bp->port.supported &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); - /* restore input for TX port IF */ - REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); + if (!(bp->link_params.speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + bp->port.supported &= ~SUPPORTED_2500baseX_Full; - bnx2x_nic_load(bp, LOAD_NORMAL); - /* wait until link state is restored */ - bnx2x_wait_for_link(bp, link_up); - } - if (bnx2x_test_nvram(bp) != 0) { - buf[3] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if (bnx2x_test_intr(bp) != 0) { - buf[4] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if (bp->port.pmf) - if (bnx2x_link_test(bp) != 0) { - buf[5] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } + if (!(bp->link_params.speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) + bp->port.supported &= ~SUPPORTED_10000baseT_Full; -#ifdef BNX2X_EXTRA_DEBUG - bnx2x_panic_dump(bp); -#endif + BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported); } -static const struct { - long offset; - int size; - u8 string[ETH_GSTRING_LEN]; -} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = { -/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" }, - { Q_STATS_OFFSET32(error_bytes_received_hi), - 8, "[%d]: rx_error_bytes" }, - { Q_STATS_OFFSET32(total_unicast_packets_received_hi), - 8, "[%d]: rx_ucast_packets" }, - { Q_STATS_OFFSET32(total_multicast_packets_received_hi), - 8, "[%d]: rx_mcast_packets" }, - { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, "[%d]: rx_bcast_packets" }, - { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" }, - { Q_STATS_OFFSET32(rx_err_discard_pkt), - 4, "[%d]: rx_phy_ip_err_discards"}, - { Q_STATS_OFFSET32(rx_skb_alloc_failed), - 4, "[%d]: rx_skb_alloc_discard" }, - { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" }, - -/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, - { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, "[%d]: tx_ucast_packets" }, - { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), - 8, "[%d]: tx_mcast_packets" }, - { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, "[%d]: tx_bcast_packets" } -}; - -static const struct { - long offset; - int size; - u32 flags; -#define STATS_FLAGS_PORT 1 -#define STATS_FLAGS_FUNC 2 -#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) - u8 string[ETH_GSTRING_LEN]; -} bnx2x_stats_arr[BNX2X_NUM_STATS] = { -/* 1 */ { STATS_OFFSET32(total_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bytes" }, - { STATS_OFFSET32(error_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, - { STATS_OFFSET32(total_unicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, - { STATS_OFFSET32(total_multicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, - { STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, - { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), - 8, STATS_FLAGS_PORT, "rx_crc_errors" }, - { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), - 8, STATS_FLAGS_PORT, "rx_align_errors" }, - { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, - { STATS_OFFSET32(etherstatsoverrsizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, -/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), - 8, STATS_FLAGS_PORT, "rx_fragments" }, - { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), - 8, STATS_FLAGS_PORT, "rx_jabbers" }, - { STATS_OFFSET32(no_buff_discard_hi), - 8, STATS_FLAGS_BOTH, "rx_discards" }, - { STATS_OFFSET32(mac_filter_discard), - 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, - { STATS_OFFSET32(xxoverflow_discard), - 4, STATS_FLAGS_PORT, "rx_fw_discards" }, - { STATS_OFFSET32(brb_drop_hi), - 8, STATS_FLAGS_PORT, "rx_brb_discard" }, - { STATS_OFFSET32(brb_truncate_hi), - 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, - { STATS_OFFSET32(pause_frames_received_hi), - 8, STATS_FLAGS_PORT, "rx_pause_frames" }, - { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), - 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, - { STATS_OFFSET32(nig_timer_max), - 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, -/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), - 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, - { STATS_OFFSET32(rx_skb_alloc_failed), - 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, - { STATS_OFFSET32(hw_csum_err), - 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, - - { STATS_OFFSET32(total_bytes_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bytes" }, - { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), - 8, STATS_FLAGS_PORT, "tx_error_bytes" }, - { STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, - { STATS_OFFSET32(total_multicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, - { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, - { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), - 8, STATS_FLAGS_PORT, "tx_mac_errors" }, - { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), - 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, -/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_single_collisions" }, - { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, - { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), - 8, STATS_FLAGS_PORT, "tx_deferred" }, - { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, - { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_late_collisions" }, - { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), - 8, STATS_FLAGS_PORT, "tx_total_collisions" }, - { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), - 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, - { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), - 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, - { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), - 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, - { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), - 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, -/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), - 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, - { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, - { STATS_OFFSET32(etherstatspktsover1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, - { STATS_OFFSET32(pause_frames_sent_hi), - 8, STATS_FLAGS_PORT, "tx_pause_frames" } -}; - -#define IS_PORT_STAT(i) \ - ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) -#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define IS_E1HMF_MODE_STAT(bp) \ - (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) - -static int bnx2x_get_sset_count(struct net_device *dev, int stringset) +static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) { - struct bnx2x *bp = netdev_priv(dev); - int i, num_stats; - - switch (stringset) { - case ETH_SS_STATS: - if (is_multi(bp)) { - num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; - if (!IS_E1HMF_MODE_STAT(bp)) - num_stats += BNX2X_NUM_STATS; - } else { - if (IS_E1HMF_MODE_STAT(bp)) { - num_stats = 0; - for (i = 0; i < BNX2X_NUM_STATS; i++) - if (IS_FUNC_STAT(i)) - num_stats++; - } else - num_stats = BNX2X_NUM_STATS; - } - return num_stats; - - case ETH_SS_TEST: - return BNX2X_NUM_TESTS; + bp->link_params.req_duplex = DUPLEX_FULL; - default: - return -EINVAL; - } -} + switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_AUTO: + if (bp->port.supported & SUPPORTED_Autoneg) { + bp->link_params.req_line_speed = SPEED_AUTO_NEG; + bp->port.advertising = bp->port.supported; + } else { + u32 ext_phy_type = + XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); -static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) -{ - struct bnx2x *bp = netdev_priv(dev); - int i, j, k; - - switch (stringset) { - case ETH_SS_STATS: - if (is_multi(bp)) { - k = 0; - for_each_queue(bp, i) { - for (j = 0; j < BNX2X_NUM_Q_STATS; j++) - sprintf(buf + (k + j)*ETH_GSTRING_LEN, - bnx2x_q_stats_arr[j].string, i); - k += BNX2X_NUM_Q_STATS; - } - if (IS_E1HMF_MODE_STAT(bp)) + if ((ext_phy_type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || + (ext_phy_type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { + /* force 10G, no AN */ + bp->link_params.req_line_speed = SPEED_10000; + bp->port.advertising = + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); break; - for (j = 0; j < BNX2X_NUM_STATS; j++) - strcpy(buf + (k + j)*ETH_GSTRING_LEN, - bnx2x_stats_arr[j].string); - } else { - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) - continue; - strcpy(buf + j*ETH_GSTRING_LEN, - bnx2x_stats_arr[i].string); - j++; } + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " Autoneg not supported\n", + bp->port.link_config); + return; } break; - case ETH_SS_TEST: - memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); + case PORT_FEATURE_LINK_SPEED_10M_FULL: + if (bp->port.supported & SUPPORTED_10baseT_Full) { + bp->link_params.req_line_speed = SPEED_10; + bp->port.advertising = (ADVERTISED_10baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERROR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + bp->port.link_config, + bp->link_params.speed_cap_mask); + return; + } break; - } -} - -static void bnx2x_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *buf) -{ - struct bnx2x *bp = netdev_priv(dev); - u32 *hw_stats, *offset; - int i, j, k; - if (is_multi(bp)) { - k = 0; - for_each_queue(bp, i) { - hw_stats = (u32 *)&bp->fp[i].eth_q_stats; - for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { - if (bnx2x_q_stats_arr[j].size == 0) { - /* skip this counter */ - buf[k + j] = 0; - continue; - } - offset = (hw_stats + - bnx2x_q_stats_arr[j].offset); - if (bnx2x_q_stats_arr[j].size == 4) { - /* 4-byte counter */ - buf[k + j] = (u64) *offset; - continue; - } - /* 8-byte counter */ - buf[k + j] = HILO_U64(*offset, *(offset + 1)); - } - k += BNX2X_NUM_Q_STATS; - } - if (IS_E1HMF_MODE_STAT(bp)) + case PORT_FEATURE_LINK_SPEED_10M_HALF: + if (bp->port.supported & SUPPORTED_10baseT_Half) { + bp->link_params.req_line_speed = SPEED_10; + bp->link_params.req_duplex = DUPLEX_HALF; + bp->port.advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERROR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + bp->port.link_config, + bp->link_params.speed_cap_mask); return; - hw_stats = (u32 *)&bp->eth_stats; - for (j = 0; j < BNX2X_NUM_STATS; j++) { - if (bnx2x_stats_arr[j].size == 0) { - /* skip this counter */ - buf[k + j] = 0; - continue; - } - offset = (hw_stats + bnx2x_stats_arr[j].offset); - if (bnx2x_stats_arr[j].size == 4) { - /* 4-byte counter */ - buf[k + j] = (u64) *offset; - continue; - } - /* 8-byte counter */ - buf[k + j] = HILO_U64(*offset, *(offset + 1)); - } - } else { - hw_stats = (u32 *)&bp->eth_stats; - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) - continue; - if (bnx2x_stats_arr[i].size == 0) { - /* skip this counter */ - buf[j] = 0; - j++; - continue; - } - offset = (hw_stats + bnx2x_stats_arr[i].offset); - if (bnx2x_stats_arr[i].size == 4) { - /* 4-byte counter */ - buf[j] = (u64) *offset; - j++; - continue; - } - /* 8-byte counter */ - buf[j] = HILO_U64(*offset, *(offset + 1)); - j++; } - } -} - -static int bnx2x_phys_id(struct net_device *dev, u32 data) -{ - struct bnx2x *bp = netdev_priv(dev); - int i; - - if (!netif_running(dev)) - return 0; - - if (!bp->port.pmf) - return 0; - - if (data == 0) - data = 2; - - for (i = 0; i < (data * 2); i++) { - if ((i % 2) == 0) - bnx2x_set_led(&bp->link_params, LED_MODE_OPER, - SPEED_1000); - else - bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0); - - msleep_interruptible(500); - if (signal_pending(current)) - break; - } - - if (bp->link_vars.link_up) - bnx2x_set_led(&bp->link_params, LED_MODE_OPER, - bp->link_vars.line_speed); - - return 0; -} - -static const struct ethtool_ops bnx2x_ethtool_ops = { - .get_settings = bnx2x_get_settings, - .set_settings = bnx2x_set_settings, - .get_drvinfo = bnx2x_get_drvinfo, - .get_regs_len = bnx2x_get_regs_len, - .get_regs = bnx2x_get_regs, - .get_wol = bnx2x_get_wol, - .set_wol = bnx2x_set_wol, - .get_msglevel = bnx2x_get_msglevel, - .set_msglevel = bnx2x_set_msglevel, - .nway_reset = bnx2x_nway_reset, - .get_link = bnx2x_get_link, - .get_eeprom_len = bnx2x_get_eeprom_len, - .get_eeprom = bnx2x_get_eeprom, - .set_eeprom = bnx2x_set_eeprom, - .get_coalesce = bnx2x_get_coalesce, - .set_coalesce = bnx2x_set_coalesce, - .get_ringparam = bnx2x_get_ringparam, - .set_ringparam = bnx2x_set_ringparam, - .get_pauseparam = bnx2x_get_pauseparam, - .set_pauseparam = bnx2x_set_pauseparam, - .get_rx_csum = bnx2x_get_rx_csum, - .set_rx_csum = bnx2x_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_hw_csum, - .set_flags = bnx2x_set_flags, - .get_flags = ethtool_op_get_flags, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, - .set_tso = bnx2x_set_tso, - .self_test = bnx2x_self_test, - .get_sset_count = bnx2x_get_sset_count, - .get_strings = bnx2x_get_strings, - .phys_id = bnx2x_phys_id, - .get_ethtool_stats = bnx2x_get_ethtool_stats, -}; - -/* end of ethtool_ops */ - -/**************************************************************************** -* General service functions -****************************************************************************/ - -static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) -{ - u16 pmcsr; + break; - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + case PORT_FEATURE_LINK_SPEED_100M_FULL: + if (bp->port.supported & SUPPORTED_100baseT_Full) { + bp->link_params.req_line_speed = SPEED_100; + bp->port.advertising = (ADVERTISED_100baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERROR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + bp->port.link_config, + bp->link_params.speed_cap_mask); + return; + } + break; - switch (state) { - case PCI_D0: - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | - PCI_PM_CTRL_PME_STATUS)); + case PORT_FEATURE_LINK_SPEED_100M_HALF: + if (bp->port.supported & SUPPORTED_100baseT_Half) { + bp->link_params.req_line_speed = SPEED_100; + bp->link_params.req_duplex = DUPLEX_HALF; + bp->port.advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERROR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + bp->port.link_config, + bp->link_params.speed_cap_mask); + return; + } + break; - if (pmcsr & PCI_PM_CTRL_STATE_MASK) - /* delay required during transition out of D3hot */ - msleep(20); + case PORT_FEATURE_LINK_SPEED_1G: + if (bp->port.supported & SUPPORTED_1000baseT_Full) { + bp->link_params.req_line_speed = SPEED_1000; + bp->port.advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERROR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + bp->port.link_config, + bp->link_params.speed_cap_mask); + return; + } break; - case PCI_D3hot: - /* If there are other clients above don't - shut down the power */ - if (atomic_read(&bp->pdev->enable_cnt) != 1) - return 0; - /* Don't shut down the power for emulation and FPGA */ - if (CHIP_REV_IS_SLOW(bp)) - return 0; - - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pmcsr |= 3; - - if (bp->wol) - pmcsr |= PCI_PM_CTRL_PME_ENABLE; - - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - pmcsr); + case PORT_FEATURE_LINK_SPEED_2_5G: + if (bp->port.supported & SUPPORTED_2500baseX_Full) { + bp->link_params.req_line_speed = SPEED_2500; + bp->port.advertising = (ADVERTISED_2500baseX_Full | + ADVERTISED_TP); + } else { + BNX2X_ERROR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + bp->port.link_config, + bp->link_params.speed_cap_mask); + return; + } + break; - /* No more memory access after this point until - * device is brought back to D0. - */ + case PORT_FEATURE_LINK_SPEED_10G_CX4: + case PORT_FEATURE_LINK_SPEED_10G_KX4: + case PORT_FEATURE_LINK_SPEED_10G_KR: + if (bp->port.supported & SUPPORTED_10000baseT_Full) { + bp->link_params.req_line_speed = SPEED_10000; + bp->port.advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + } else { + BNX2X_ERROR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + bp->port.link_config, + bp->link_params.speed_cap_mask); + return; + } break; default: - return -EINVAL; + BNX2X_ERROR("NVRAM config error. " + "BAD link speed link_config 0x%x\n", + bp->port.link_config); + bp->link_params.req_line_speed = SPEED_AUTO_NEG; + bp->port.advertising = bp->port.supported; + break; } - return 0; -} -static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) -{ - u16 rx_cons_sb; + bp->link_params.req_flow_ctrl = (bp->port.link_config & + PORT_FEATURE_FLOW_CONTROL_MASK); + if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && + !(bp->port.supported & SUPPORTED_Autoneg)) + bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; - /* Tell compiler that status block fields can change */ - barrier(); - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; - return (fp->rx_comp_cons != rx_cons_sb); + BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" + " advertising 0x%x\n", + bp->link_params.req_line_speed, + bp->link_params.req_duplex, + bp->link_params.req_flow_ctrl, bp->port.advertising); } -/* - * net_device service functions - */ - -static int bnx2x_poll(struct napi_struct *napi, int budget) +static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) { - int work_done = 0; - struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, - napi); - struct bnx2x *bp = fp->bp; - - while (1) { -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - napi_complete(napi); - return 0; - } -#endif - - if (bnx2x_has_tx_work(fp)) - bnx2x_tx_int(fp); + mac_hi = cpu_to_be16(mac_hi); + mac_lo = cpu_to_be32(mac_lo); + memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); + memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); +} - if (bnx2x_has_rx_work(fp)) { - work_done += bnx2x_rx_int(fp, budget - work_done); +static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val, val2; + u32 config; + u16 i; + u32 ext_phy_type; - /* must not complete if we consumed full budget */ - if (work_done >= budget) - break; - } + bp->link_params.bp = bp; + bp->link_params.port = port; - /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - bnx2x_update_fpsb_idx(fp); - /* bnx2x_has_rx_work() reads the status block, thus we need - * to ensure that status block indices have been actually read - * (bnx2x_update_fpsb_idx) prior to this check - * (bnx2x_has_rx_work) so that we won't write the "newer" - * value of the status block to IGU (if there was a DMA right - * after bnx2x_has_rx_work and if there is no rmb, the memory - * reading (bnx2x_update_fpsb_idx) may be postponed to right - * before bnx2x_ack_sb). In this case there will never be - * another interrupt until there is another update of the - * status block, while there is still unhandled work. - */ - rmb(); - - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - napi_complete(napi); - /* Re-enable interrupts */ - bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, - le16_to_cpu(fp->fp_c_idx), - IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, - le16_to_cpu(fp->fp_u_idx), - IGU_INT_ENABLE, 1); - break; - } - } + bp->link_params.lane_config = + SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); + bp->link_params.ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + /* BCM8727_NOC => BCM8727 no over current */ + if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) { + bp->link_params.ext_phy_config &= + ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + bp->link_params.ext_phy_config |= + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727; + bp->link_params.feature_config_flags |= + FEATURE_CONFIG_BCM8727_NOC; } - return work_done; -} - - -/* we split the first BD into headers and data BDs - * to ease the pain of our fellow microcode engineers - * we use one mapping for both BDs - * So far this has only been observed to happen - * in Other Operating Systems(TM) - */ -static noinline u16 bnx2x_tx_split(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - struct sw_tx_bd *tx_buf, - struct eth_tx_start_bd **tx_bd, u16 hlen, - u16 bd_prod, int nbd) -{ - struct eth_tx_start_bd *h_tx_bd = *tx_bd; - struct eth_tx_bd *d_tx_bd; - dma_addr_t mapping; - int old_len = le16_to_cpu(h_tx_bd->nbytes); - - /* first fix first BD */ - h_tx_bd->nbd = cpu_to_le16(nbd); - h_tx_bd->nbytes = cpu_to_le16(hlen); + bp->link_params.speed_cap_mask = + SHMEM_RD(bp, + dev_info.port_hw_config[port].speed_capability_mask); - DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " - "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, - h_tx_bd->addr_lo, h_tx_bd->nbd); + bp->port.link_config = + SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); - /* now get a new data BD - * (after the pbd) and fill it */ - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; + /* Get the 4 lanes xgxs config rx and tx */ + for (i = 0; i < 2; i++) { + val = SHMEM_RD(bp, + dev_info.port_hw_config[port].xgxs_config_rx[i<<1]); + bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff); + bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff); - mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), - le32_to_cpu(h_tx_bd->addr_lo)) + hlen; + val = SHMEM_RD(bp, + dev_info.port_hw_config[port].xgxs_config_tx[i<<1]); + bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff); + bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff); + } - d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); + /* If the device is capable of WoL, set the default state according + * to the HW + */ + config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); + bp->wol = (!(bp->flags & NO_WOL_FLAG) && + (config & PORT_FEATURE_WOL_ENABLED)); - /* this marks the BD as one that has no individual mapping */ - tx_buf->flags |= BNX2X_TSO_SPLIT_BD; + BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x" + " speed_cap_mask 0x%08x link_config 0x%08x\n", + bp->link_params.lane_config, + bp->link_params.ext_phy_config, + bp->link_params.speed_cap_mask, bp->port.link_config); - DP(NETIF_MSG_TX_QUEUED, - "TSO split data size is %d (%x:%x)\n", - d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); + bp->link_params.switch_cfg |= (bp->port.link_config & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); - /* update tx_bd */ - *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; + bnx2x_link_settings_requested(bp); - return bd_prod; -} + /* + * If connected directly, work with the internal PHY, otherwise, work + * with the external PHY + */ + ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); + if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + bp->mdio.prtad = bp->link_params.phy_addr; -static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) -{ - if (fix > 0) - csum = (u16) ~csum_fold(csum_sub(csum, - csum_partial(t_header - fix, fix, 0))); + else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && + (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + bp->mdio.prtad = + XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); - else if (fix < 0) - csum = (u16) ~csum_fold(csum_add(csum, - csum_partial(t_header, -fix, 0))); + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); - return swab16(csum); +#ifdef BCM_CNIC + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower); + bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); +#endif } -static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) +static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) { - u32 rc; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - rc = XMIT_PLAIN; - - else { - if (skb->protocol == htons(ETH_P_IPV6)) { - rc = XMIT_CSUM_V6; - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - rc |= XMIT_CSUM_TCP; - - } else { - rc = XMIT_CSUM_V4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - rc |= XMIT_CSUM_TCP; - } - } - - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) - rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); - - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); - - return rc; -} + int func = BP_FUNC(bp); + u32 val, val2; + int rc = 0; -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) -/* check if packet requires linearization (packet is too fragmented) - no need to check fragmentation if page size > 8K (there will be no - violation to FW restrictions) */ -static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, - u32 xmit_type) -{ - int to_copy = 0; - int hlen = 0; - int first_bd_sz = 0; - - /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ - if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { - - if (xmit_type & XMIT_GSO) { - unsigned short lso_mss = skb_shinfo(skb)->gso_size; - /* Check if LSO packet needs to be copied: - 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ - int wnd_size = MAX_FETCH_BD - 3; - /* Number of windows to check */ - int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; - int wnd_idx = 0; - int frag_idx = 0; - u32 wnd_sum = 0; - - /* Headers length */ - hlen = (int)(skb_transport_header(skb) - skb->data) + - tcp_hdrlen(skb); - - /* Amount of data (w/o headers) on linear part of SKB*/ - first_bd_sz = skb_headlen(skb) - hlen; - - wnd_sum = first_bd_sz; - - /* Calculate the first sum - it's special */ - for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) - wnd_sum += - skb_shinfo(skb)->frags[frag_idx].size; - - /* If there was data on linear skb data - check it */ - if (first_bd_sz > 0) { - if (unlikely(wnd_sum < lso_mss)) { - to_copy = 1; - goto exit_lbl; - } + bnx2x_get_common_hwinfo(bp); - wnd_sum -= first_bd_sz; - } + bp->e1hov = 0; + bp->e1hmf = 0; + if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { + bp->mf_config = + SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); - /* Others are easier: run through the frag list and - check all windows */ - for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { - wnd_sum += - skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; + val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK); + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) + bp->e1hmf = 1; + BNX2X_DEV_INFO("%s function mode\n", + IS_E1HMF(bp) ? "multi" : "single"); - if (unlikely(wnd_sum < lso_mss)) { - to_copy = 1; - break; - } - wnd_sum -= - skb_shinfo(skb)->frags[wnd_idx].size; + if (IS_E1HMF(bp)) { + val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func]. + e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK); + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->e1hov = val; + BNX2X_DEV_INFO("E1HOV for func %d is %d " + "(0x%04x)\n", + func, bp->e1hov, bp->e1hov); + } else { + BNX2X_ERROR("No valid E1HOV for func %d," + " aborting\n", func); + rc = -EPERM; } } else { - /* in non-LSO too fragmented packet should always - be linearized */ - to_copy = 1; + if (BP_E1HVN(bp)) { + BNX2X_ERROR("VN %d in single function mode," + " aborting\n", BP_E1HVN(bp)); + rc = -EPERM; + } } } -exit_lbl: - if (unlikely(to_copy)) - DP(NETIF_MSG_TX_QUEUED, - "Linearization IS REQUIRED for %s packet. " - "num_frags %d hlen %d first_bd_sz %d\n", - (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", - skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); - - return to_copy; -} -#endif - -/* called with netif_tx_lock - * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call - * netif_wake_queue() - */ -static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - struct bnx2x_fastpath *fp; - struct netdev_queue *txq; - struct sw_tx_bd *tx_buf; - struct eth_tx_start_bd *tx_start_bd; - struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; - struct eth_tx_parse_bd *pbd = NULL; - u16 pkt_prod, bd_prod; - int nbd, fp_index; - dma_addr_t mapping; - u32 xmit_type = bnx2x_xmit_type(bp, skb); - int i; - u8 hlen = 0; - __le16 pkt_size = 0; - struct ethhdr *eth; - u8 mac_type = UNICAST_ADDRESS; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return NETDEV_TX_BUSY; -#endif - - fp_index = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, fp_index); - - fp = &bp->fp[fp_index]; + if (!BP_NOMCP(bp)) { + bnx2x_get_port_hwinfo(bp); - if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { - fp->eth_q_stats.driver_xoff++; - netif_tx_stop_queue(txq); - BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); - return NETDEV_TX_BUSY; + bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); } - DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" - " gso type %x xmit_type %x\n", - skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, - ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); + if (IS_E1HMF(bp)) { + val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); + val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); + if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && + (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { + bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); + bp->dev->dev_addr[1] = (u8)(val2 & 0xff); + bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff); + bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff); + bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff); + bp->dev->dev_addr[5] = (u8)(val & 0xff); + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, + ETH_ALEN); + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, + ETH_ALEN); + } - eth = (struct ethhdr *)skb->data; + return rc; + } - /* set flag according to packet type (UNICAST_ADDRESS is default)*/ - if (unlikely(is_multicast_ether_addr(eth->h_dest))) { - if (is_broadcast_ether_addr(eth->h_dest)) - mac_type = BROADCAST_ADDRESS; - else - mac_type = MULTICAST_ADDRESS; - } - -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) - /* First, check if we need to linearize the skb (due to FW - restrictions). No need to check fragmentation if page size > 8K - (there will be no violation to FW restrictions) */ - if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { - /* Statistics of linearization */ - bp->lin_cnt++; - if (skb_linearize(skb) != 0) { - DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " - "silently dropping this SKB\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (BP_NOMCP(bp)) { + /* only supposed to happen on emulation/FPGA */ + BNX2X_ERROR("warning: random MAC workaround active\n"); + random_ether_addr(bp->dev->dev_addr); + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); } -#endif - /* - Please read carefully. First we use one BD which we mark as start, - then we have a parsing info BD (used for TSO or xsum), - and only then we have the rest of the TSO BDs. - (don't forget to mark the last one as last, - and to unmap only AFTER you write to the BD ...) - And above all, all pdb sizes are in words - NOT DWORDS! - */ - - pkt_prod = fp->tx_pkt_prod++; - bd_prod = TX_BD(fp->tx_bd_prod); - - /* get a tx_buf and first BD */ - tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; - tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; - - tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - tx_start_bd->general_data = (mac_type << - ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); - /* header nbd */ - tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); - - /* remember the first BD of the packet */ - tx_buf->first_bd = fp->tx_bd_prod; - tx_buf->skb = skb; - tx_buf->flags = 0; - - DP(NETIF_MSG_TX_QUEUED, - "sending pkt %u @%p next_idx %u bd %u @%p\n", - pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); + return rc; +} -#ifdef BCM_VLAN - if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && - (bp->flags & HW_VLAN_TX_FLAG)) { - tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; - } else -#endif - tx_start_bd->vlan = cpu_to_le16(pkt_prod); +static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) +{ + int cnt, i, block_end, rodi; + char vpd_data[BNX2X_VPD_LEN+1]; + char str_id_reg[VENDOR_ID_LEN+1]; + char str_id_cap[VENDOR_ID_LEN+1]; + u8 len; - /* turn on parsing and get a BD */ - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - pbd = &fp->tx_desc_ring[bd_prod].parse_bd; + cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); + memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); - memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); + if (cnt < BNX2X_VPD_LEN) + goto out_not_found; - if (xmit_type & XMIT_CSUM) { - hlen = (skb_network_header(skb) - skb->data) / 2; + i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, + PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; - /* for now NS flag is not used in Linux */ - pbd->global_data = - (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << - ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); - pbd->ip_hlen = (skb_transport_header(skb) - - skb_network_header(skb)) / 2; + block_end = i + PCI_VPD_LRDT_TAG_SIZE + + pci_vpd_lrdt_size(&vpd_data[i]); - hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; + i += PCI_VPD_LRDT_TAG_SIZE; - pbd->total_hlen = cpu_to_le16(hlen); - hlen = hlen*2; + if (block_end > BNX2X_VPD_LEN) + goto out_not_found; - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (rodi < 0) + goto out_not_found; - if (xmit_type & XMIT_CSUM_V4) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IP_CSUM; - else - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IPV6; + len = pci_vpd_info_field_size(&vpd_data[rodi]); - if (xmit_type & XMIT_CSUM_TCP) { - pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); + if (len != VENDOR_ID_LEN) + goto out_not_found; - } else { - s8 fix = SKB_CS_OFF(skb); /* signed! */ + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG; + /* vendor specific info */ + snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); + snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); + if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || + !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { - DP(NETIF_MSG_TX_QUEUED, - "hlen %d fix %d csum before fix %x\n", - le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (rodi >= 0) { + len = pci_vpd_info_field_size(&vpd_data[rodi]); - /* HW bug: fixup the CSUM */ - pbd->tcp_pseudo_csum = - bnx2x_csum_fix(skb_transport_header(skb), - SKB_CS(skb), fix); + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", - pbd->tcp_pseudo_csum); + if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { + memcpy(bp->fw_ver, &vpd_data[rodi], len); + bp->fw_ver[len] = ' '; + } } + return; } +out_not_found: + return; +} - mapping = dma_map_single(&bp->pdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - - tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ - tx_start_bd->nbd = cpu_to_le16(nbd); - tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); - pkt_size = tx_start_bd->nbytes; - - DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" - " nbytes %d flags %x vlan %x\n", - tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, - le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), - tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); +static int __devinit bnx2x_init_bp(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + int timer_interval; + int rc; - if (xmit_type & XMIT_GSO) { + /* Disable interrupt handling until HW is initialized */ + atomic_set(&bp->intr_sem, 1); + smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ - DP(NETIF_MSG_TX_QUEUED, - "TSO packet len %d hlen %d total len %d tso size %d\n", - skb->len, hlen, skb_headlen(skb), - skb_shinfo(skb)->gso_size); + mutex_init(&bp->port.phy_mutex); + mutex_init(&bp->fw_mb_mutex); + spin_lock_init(&bp->stats_lock); +#ifdef BCM_CNIC + mutex_init(&bp->cnic_mutex); +#endif - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; + INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); + INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); - if (unlikely(skb_headlen(skb) > hlen)) - bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, - hlen, bd_prod, ++nbd); + rc = bnx2x_get_hwinfo(bp); - pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); - pbd->tcp_flags = pbd_tcp_flags(skb); + bnx2x_read_fwinfo(bp); + /* need to reset chip if undi was active */ + if (!BP_NOMCP(bp)) + bnx2x_undi_unload(bp); - if (xmit_type & XMIT_GSO_V4) { - pbd->ip_id = swab16(ip_hdr(skb)->id); - pbd->tcp_pseudo_csum = - swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + if (CHIP_REV_IS_FPGA(bp)) + dev_err(&bp->pdev->dev, "FPGA detected\n"); - } else - pbd->tcp_pseudo_csum = - swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); + if (BP_NOMCP(bp) && (func == 0)) + dev_err(&bp->pdev->dev, "MCP disabled, " + "must load devices in order!\n"); - pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; + /* Set multi queue mode */ + if ((multi_mode != ETH_RSS_MODE_DISABLED) && + ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { + dev_err(&bp->pdev->dev, "Multi disabled since int_mode " + "requested is not MSI-X\n"); + multi_mode = ETH_RSS_MODE_DISABLED; } - tx_data_bd = (struct eth_tx_bd *)tx_start_bd; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; - if (total_pkt_bd == NULL) - total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; - - mapping = dma_map_page(&bp->pdev->dev, frag->page, - frag->page_offset, - frag->size, DMA_TO_DEVICE); + bp->multi_mode = multi_mode; + bp->int_mode = int_mode; - tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - tx_data_bd->nbytes = cpu_to_le16(frag->size); - le16_add_cpu(&pkt_size, frag->size); + bp->dev->features |= NETIF_F_GRO; - DP(NETIF_MSG_TX_QUEUED, - "frag %d bd @%p addr (%x:%x) nbytes %d\n", - i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, - le16_to_cpu(tx_data_bd->nbytes)); + /* Set TPA flags */ + if (disable_tpa) { + bp->flags &= ~TPA_ENABLE_FLAG; + bp->dev->features &= ~NETIF_F_LRO; + } else { + bp->flags |= TPA_ENABLE_FLAG; + bp->dev->features |= NETIF_F_LRO; } + bp->disable_tpa = disable_tpa; - DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); - - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - - /* now send a tx doorbell, counting the next BD - * if the packet contains or ends with it - */ - if (TX_BD_POFF(bd_prod) < nbd) - nbd++; - - if (total_pkt_bd != NULL) - total_pkt_bd->total_pkt_bytes = pkt_size; - - if (pbd) - DP(NETIF_MSG_TX_QUEUED, - "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" - " tcp_flags %x xsum %x seq %u hlen %u\n", - pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, - pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, - pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); + if (CHIP_IS_E1(bp)) + bp->dropless_fc = 0; + else + bp->dropless_fc = dropless_fc; - DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + bp->mrrs = mrrs; - /* - * Make sure that the BD data is updated before updating the producer - * since FW might read the BD right after the producer is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes packets must have BDs. - */ - wmb(); + bp->tx_ring_size = MAX_TX_AVAIL; + bp->rx_ring_size = MAX_RX_AVAIL; - fp->tx_db.data.prod += nbd; - barrier(); - DOORBELL(bp, fp->index, fp->tx_db.raw); + bp->rx_csum = 1; - mmiowb(); + /* make sure that the numbers are in the right granularity */ + bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); + bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); - fp->tx_bd_prod += nbd; + timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); + bp->current_interval = (poll ? poll : timer_interval); - if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { - netif_tx_stop_queue(txq); + init_timer(&bp->timer); + bp->timer.expires = jiffies + bp->current_interval; + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2x_timer; - /* paired memory barrier is in bnx2x_tx_int(), we have to keep - * ordering of set_bit() in netif_tx_stop_queue() and read of - * fp->bd_tx_cons */ - smp_mb(); + return rc; +} - fp->eth_q_stats.driver_xoff++; - if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) - netif_tx_wake_queue(txq); - } - fp->tx_pkt++; - return NETDEV_TX_OK; -} +/**************************************************************************** +* General service functions +****************************************************************************/ /* called with rtnl_lock */ static int bnx2x_open(struct net_device *dev) @@ -12586,7 +6849,7 @@ static int bnx2x_close(struct net_device *dev) } /* called with netif_tx_lock from dev_mcast.c */ -static void bnx2x_set_rx_mode(struct net_device *dev) +void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); u32 rx_mode = BNX2X_RX_MODE_NORMAL; @@ -12706,25 +6969,6 @@ static void bnx2x_set_rx_mode(struct net_device *dev) bnx2x_set_storm_rx_mode(bp); } -/* called with rtnl_lock */ -static int bnx2x_change_mac_addr(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - struct bnx2x *bp = netdev_priv(dev); - - if (!is_valid_ether_addr((u8 *)(addr->sa_data))) - return -EINVAL; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - if (netif_running(dev)) { - if (CHIP_IS_E1(bp)) - bnx2x_set_eth_mac_addr_e1(bp, 1); - else - bnx2x_set_eth_mac_addr_e1h(bp, 1); - } - - return 0; -} /* called with rtnl_lock */ static int bnx2x_mdio_read(struct net_device *netdev, int prtad, @@ -12800,71 +7044,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return mdio_mii_ioctl(&bp->mdio, mdio, cmd); } -/* called with rtnl_lock */ -static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) -{ - struct bnx2x *bp = netdev_priv(dev); - int rc = 0; - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } - - if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) - return -EINVAL; - - /* This does not race with packet allocation - * because the actual alloc size is - * only updated as part of load - */ - dev->mtu = new_mtu; - - if (netif_running(dev)) { - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); - } - - return rc; -} - -static void bnx2x_tx_timeout(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - -#ifdef BNX2X_STOP_ON_ERROR - if (!bp->panic) - bnx2x_panic(); -#endif - /* This allows the netif to be shutdown gracefully before resetting */ - schedule_delayed_work(&bp->reset_task, 0); -} - -#ifdef BCM_VLAN -/* called with rtnl_lock */ -static void bnx2x_vlan_rx_register(struct net_device *dev, - struct vlan_group *vlgrp) -{ - struct bnx2x *bp = netdev_priv(dev); - - bp->vlgrp = vlgrp; - - /* Set flags according to the required capabilities */ - bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); - - if (dev->features & NETIF_F_HW_VLAN_TX) - bp->flags |= HW_VLAN_TX_FLAG; - - if (dev->features & NETIF_F_HW_VLAN_RX) - bp->flags |= HW_VLAN_RX_FLAG; - - if (netif_running(dev)) - bnx2x_set_client_config(bp); -} - -#endif - #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_bnx2x(struct net_device *dev) { @@ -13013,7 +7192,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; - dev->ethtool_ops = &bnx2x_ethtool_ops; + bnx2x_set_ethtool_ops(dev); dev->features |= NETIF_F_SG; dev->features |= NETIF_F_HW_CSUM; if (bp->flags & USING_DAC_FLAG) @@ -13366,73 +7545,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return -ENODEV; - } - bp = netdev_priv(dev); - - rtnl_lock(); - - pci_save_state(pdev); - - if (!netif_running(dev)) { - rtnl_unlock(); - return 0; - } - - netif_device_detach(dev); - - bnx2x_nic_unload(bp, UNLOAD_CLOSE); - - bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); - - rtnl_unlock(); - - return 0; -} - -static int bnx2x_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - int rc; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return -ENODEV; - } - bp = netdev_priv(dev); - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } - - rtnl_lock(); - - pci_restore_state(pdev); - - if (!netif_running(dev)) { - rtnl_unlock(); - return 0; - } - - bnx2x_set_power_state(bp, PCI_D0); - netif_device_attach(dev); - - rc = bnx2x_nic_load(bp, LOAD_OPEN); - - rtnl_unlock(); - - return rc; -} - static int bnx2x_eeh_nic_unload(struct bnx2x *bp) { int i; @@ -13754,7 +7866,7 @@ static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) /* * for commands that have no data */ -static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) { struct cnic_ctl_info ctl = {0}; @@ -13822,7 +7934,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) return rc; } -static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) { struct cnic_eth_dev *cp = &bp->cnic_eth_dev; diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h similarity index 100% rename from drivers/net/bnx2x_reg.h rename to drivers/net/bnx2x/bnx2x_reg.h diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..c74724461020aa9ba60265a43907eabca6a40454 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -0,0 +1,1411 @@ +/* bnx2x_stats.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + #include "bnx2x_cmn.h" + #include "bnx2x_stats.h" + +/* Statistics */ + +/**************************************************************************** +* Macros +****************************************************************************/ + +/* sum[hi:lo] += add[hi:lo] */ +#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ + do { \ + s_lo += a_lo; \ + s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ + } while (0) + +/* difference = minuend - subtrahend */ +#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ + do { \ + if (m_lo < s_lo) { \ + /* underflow */ \ + d_hi = m_hi - s_hi; \ + if (d_hi > 0) { \ + /* we can 'loan' 1 */ \ + d_hi--; \ + d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ + } else { \ + /* m_hi <= s_hi */ \ + d_hi = 0; \ + d_lo = 0; \ + } \ + } else { \ + /* m_lo >= s_lo */ \ + if (m_hi < s_hi) { \ + d_hi = 0; \ + d_lo = 0; \ + } else { \ + /* m_hi >= s_hi */ \ + d_hi = m_hi - s_hi; \ + d_lo = m_lo - s_lo; \ + } \ + } \ + } while (0) + +#define UPDATE_STAT64(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ + diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ + pstats->mac_stx[0].t##_hi = new->s##_hi; \ + pstats->mac_stx[0].t##_lo = new->s##_lo; \ + ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ + pstats->mac_stx[1].t##_lo, diff.lo); \ + } while (0) + +#define UPDATE_STAT64_NIG(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ + diff.lo, new->s##_lo, old->s##_lo); \ + ADD_64(estats->t##_hi, diff.hi, \ + estats->t##_lo, diff.lo); \ + } while (0) + +/* sum[hi:lo] += add */ +#define ADD_EXTEND_64(s_hi, s_lo, a) \ + do { \ + s_lo += a; \ + s_hi += (s_lo < a) ? 1 : 0; \ + } while (0) + +#define UPDATE_EXTEND_STAT(s) \ + do { \ + ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ + pstats->mac_stx[1].s##_lo, \ + new->s); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT(s, t) \ + do { \ + diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ + old_tclient->s = tclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + old_uclient->s = uclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_XSTAT(s, t) \ + do { \ + diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ + old_xclient->s = xclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +/* minuend -= subtrahend */ +#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ + do { \ + DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ + } while (0) + +/* minuend[hi:lo] -= subtrahend */ +#define SUB_EXTEND_64(m_hi, m_lo, s) \ + do { \ + SUB_64(m_hi, 0, m_lo, s); \ + } while (0) + +#define SUB_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +/* + * General service functions + */ + +static inline long bnx2x_hilo(u32 *hiref) +{ + u32 lo = *(hiref + 1); +#if (BITS_PER_LONG == 64) + u32 hi = *hiref; + + return HILO_U64(hi, lo); +#else + return lo; +#endif +} + +/* + * Init service functions + */ + + +static void bnx2x_storm_stats_post(struct bnx2x *bp) +{ + if (!bp->stats_pending) { + struct eth_query_ramrod_data ramrod_data = {0}; + int i, rc; + + spin_lock_bh(&bp->stats_lock); + + ramrod_data.drv_counter = bp->stats_counter++; + ramrod_data.collect_port = bp->port.pmf ? 1 : 0; + for_each_queue(bp, i) + ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); + + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, + ((u32 *)&ramrod_data)[1], + ((u32 *)&ramrod_data)[0], 0); + if (rc == 0) { + /* stats ramrod has it's own slot on the spq */ + bp->spq_left++; + bp->stats_pending = 1; + } + + spin_unlock_bh(&bp->stats_lock); + } +} + +static void bnx2x_hw_stats_post(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + *stats_comp = DMAE_COMP_VAL; + if (CHIP_REV_IS_SLOW(bp)) + return; + + /* loader */ + if (bp->executer_idx) { + int loader_idx = PMF_DMAE_C(bp); + + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | + DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : + DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); + dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + + sizeof(struct dmae_command) * + (loader_idx + 1)) >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct dmae_command) >> 2; + if (CHIP_IS_E1(bp)) + dmae->len--; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + *stats_comp = 0; + bnx2x_post_dmae(bp, dmae, loader_idx); + + } else if (bp->func_stx) { + *stats_comp = 0; + bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + } +} + +static int bnx2x_stats_comp(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + int cnt = 10; + + might_sleep(); + while (*stats_comp != DMAE_COMP_VAL) { + if (!cnt) { + BNX2X_ERR("timeout waiting for stats finished\n"); + break; + } + cnt--; + msleep(1); + } + return 1; +} + +/* + * Statistics service functions + */ + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); + dmae->src_addr_lo = bp->port.port_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->len = DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); + dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +static void bnx2x_port_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + int port = BP_PORT(bp); + int vn = BP_E1HVN(bp); + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 mac_addr; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->link_vars.link_up || !bp->port.pmf) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + /* MCP */ + opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (vn << DMAE_CMD_E1HVN_SHIFT)); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* MAC */ + opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (vn << DMAE_CMD_E1HVN_SHIFT)); + + if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { + + mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM); + + /* BIGMAC_REGISTER_TX_STAT_GTPKT .. + BIGMAC_REGISTER_TX_STAT_GTBYT */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* BIGMAC_REGISTER_RX_STAT_GR64 .. + BIGMAC_REGISTER_RX_STAT_GRIPJ */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct bmac_stats, rx_stat_gr64_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct bmac_stats, rx_stat_gr64_lo)); + dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { + + mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); + + /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_RX_STAT_AC_28 */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->len = 1; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_TX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* NIG */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : + NIG_REG_STAT0_BRB_DISCARD) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); + dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : + NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (vn << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : + NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void bnx2x_func_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (bp->port.pmf) + bnx2x_port_stats_init(bp); + + else if (bp->func_stx) + bnx2x_func_stats_init(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); +} + +static void bnx2x_stats_pmf_start(struct bnx2x *bp) +{ + bnx2x_stats_comp(bp); + bnx2x_stats_pmf_update(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_stats_restart(struct bnx2x *bp) +{ + bnx2x_stats_comp(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_bmac_stats_update(struct bnx2x *bp) +{ + struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_bmac_xpf_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_bmac_xpf_lo; + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxoffsent_lo; +} + +static void bnx2x_emac_stats_update(struct bnx2x *bp) +{ + struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); + UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); + UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); + UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); + UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); + UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); + UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); + UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); + UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); + UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); + UPDATE_EXTEND_STAT(tx_stat_outxonsent); + UPDATE_EXTEND_STAT(tx_stat_outxoffsent); + UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); + UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); + UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); + UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; + ADD_64(estats->pause_frames_received_hi, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, + estats->pause_frames_received_lo, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxonsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxonsent_lo; + ADD_64(estats->pause_frames_sent_hi, + pstats->mac_stx[1].tx_stat_outxoffsent_hi, + estats->pause_frames_sent_lo, + pstats->mac_stx[1].tx_stat_outxoffsent_lo); +} + +static int bnx2x_hw_stats_update(struct bnx2x *bp) +{ + struct nig_stats *new = bnx2x_sp(bp, nig_stats); + struct nig_stats *old = &(bp->port.old_nig_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + if (bp->link_vars.mac_type == MAC_TYPE_BMAC) + bnx2x_bmac_stats_update(bp); + + else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) + bnx2x_emac_stats_update(bp); + + else { /* unreached */ + BNX2X_ERR("stats updated by DMAE but no MAC active\n"); + return -1; + } + + ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, + new->brb_discard - old->brb_discard); + ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, + new->brb_truncate - old->brb_truncate); + + UPDATE_STAT64_NIG(egress_mac_pkt0, + etherstatspkts1024octetsto1522octets); + UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); + + memcpy(old, new, sizeof(struct nig_stats)); + + memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), + sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + + pstats->host_port_stats_start = ++pstats->host_port_stats_end; + + if (!BP_NOMCP(bp)) { + u32 nig_timer_max = + SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); + if (nig_timer_max != estats->nig_timer_max) { + estats->nig_timer_max = nig_timer_max; + BNX2X_ERR("NIG timer max (%u)\n", + estats->nig_timer_max); + } + } + + return 0; +} + +static int bnx2x_storm_stats_update(struct bnx2x *bp) +{ + struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); + struct tstorm_per_port_stats *tport = + &stats->tstorm_common.port_statistics; + struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; + u16 cur_stats_counter; + + /* Make sure we use the value of the counter + * used for sending the last stats ramrod. + */ + spin_lock_bh(&bp->stats_lock); + cur_stats_counter = bp->stats_counter - 1; + spin_unlock_bh(&bp->stats_lock); + + memcpy(&(fstats->total_bytes_received_hi), + &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), + sizeof(struct host_func_stats) - 2*sizeof(u32)); + estats->error_bytes_received_hi = 0; + estats->error_bytes_received_lo = 0; + estats->etherstatsoverrsizepkts_hi = 0; + estats->etherstatsoverrsizepkts_lo = 0; + estats->no_buff_discard_hi = 0; + estats->no_buff_discard_lo = 0; + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + int cl_id = fp->cl_id; + struct tstorm_per_client_stats *tclient = + &stats->tstorm_common.client_statistics[cl_id]; + struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; + struct ustorm_per_client_stats *uclient = + &stats->ustorm_common.client_statistics[cl_id]; + struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; + struct xstorm_per_client_stats *xclient = + &stats->xstorm_common.client_statistics[cl_id]; + struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + u32 diff; + + /* are storm stats valid? */ + if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" + " xstorm counter (0x%x) != stats_counter (0x%x)\n", + i, xclient->stats_counter, cur_stats_counter + 1); + return -1; + } + if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" + " tstorm counter (0x%x) != stats_counter (0x%x)\n", + i, tclient->stats_counter, cur_stats_counter + 1); + return -2; + } + if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" + " ustorm counter (0x%x) != stats_counter (0x%x)\n", + i, uclient->stats_counter, cur_stats_counter + 1); + return -4; + } + + qstats->total_bytes_received_hi = + le32_to_cpu(tclient->rcv_broadcast_bytes.hi); + qstats->total_bytes_received_lo = + le32_to_cpu(tclient->rcv_broadcast_bytes.lo); + + ADD_64(qstats->total_bytes_received_hi, + le32_to_cpu(tclient->rcv_multicast_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(tclient->rcv_multicast_bytes.lo)); + + ADD_64(qstats->total_bytes_received_hi, + le32_to_cpu(tclient->rcv_unicast_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(tclient->rcv_unicast_bytes.lo)); + + SUB_64(qstats->total_bytes_received_hi, + le32_to_cpu(uclient->bcast_no_buff_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); + + SUB_64(qstats->total_bytes_received_hi, + le32_to_cpu(uclient->mcast_no_buff_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); + + SUB_64(qstats->total_bytes_received_hi, + le32_to_cpu(uclient->ucast_no_buff_bytes.hi), + qstats->total_bytes_received_lo, + le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); + + qstats->valid_bytes_received_hi = + qstats->total_bytes_received_hi; + qstats->valid_bytes_received_lo = + qstats->total_bytes_received_lo; + + qstats->error_bytes_received_hi = + le32_to_cpu(tclient->rcv_error_bytes.hi); + qstats->error_bytes_received_lo = + le32_to_cpu(tclient->rcv_error_bytes.lo); + + ADD_64(qstats->total_bytes_received_hi, + qstats->error_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->error_bytes_received_lo); + + UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, + total_unicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, + total_multicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_TSTAT(packets_too_big_discard, + etherstatsoverrsizepkts); + UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); + + SUB_EXTEND_USTAT(ucast_no_buff_pkts, + total_unicast_packets_received); + SUB_EXTEND_USTAT(mcast_no_buff_pkts, + total_multicast_packets_received); + SUB_EXTEND_USTAT(bcast_no_buff_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); + + qstats->total_bytes_transmitted_hi = + le32_to_cpu(xclient->unicast_bytes_sent.hi); + qstats->total_bytes_transmitted_lo = + le32_to_cpu(xclient->unicast_bytes_sent.lo); + + ADD_64(qstats->total_bytes_transmitted_hi, + le32_to_cpu(xclient->multicast_bytes_sent.hi), + qstats->total_bytes_transmitted_lo, + le32_to_cpu(xclient->multicast_bytes_sent.lo)); + + ADD_64(qstats->total_bytes_transmitted_hi, + le32_to_cpu(xclient->broadcast_bytes_sent.hi), + qstats->total_bytes_transmitted_lo, + le32_to_cpu(xclient->broadcast_bytes_sent.lo)); + + UPDATE_EXTEND_XSTAT(unicast_pkts_sent, + total_unicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(multicast_pkts_sent, + total_multicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, + total_broadcast_packets_transmitted); + + old_tclient->checksum_discard = tclient->checksum_discard; + old_tclient->ttl0_discard = tclient->ttl0_discard; + + ADD_64(fstats->total_bytes_received_hi, + qstats->total_bytes_received_hi, + fstats->total_bytes_received_lo, + qstats->total_bytes_received_lo); + ADD_64(fstats->total_bytes_transmitted_hi, + qstats->total_bytes_transmitted_hi, + fstats->total_bytes_transmitted_lo, + qstats->total_bytes_transmitted_lo); + ADD_64(fstats->total_unicast_packets_received_hi, + qstats->total_unicast_packets_received_hi, + fstats->total_unicast_packets_received_lo, + qstats->total_unicast_packets_received_lo); + ADD_64(fstats->total_multicast_packets_received_hi, + qstats->total_multicast_packets_received_hi, + fstats->total_multicast_packets_received_lo, + qstats->total_multicast_packets_received_lo); + ADD_64(fstats->total_broadcast_packets_received_hi, + qstats->total_broadcast_packets_received_hi, + fstats->total_broadcast_packets_received_lo, + qstats->total_broadcast_packets_received_lo); + ADD_64(fstats->total_unicast_packets_transmitted_hi, + qstats->total_unicast_packets_transmitted_hi, + fstats->total_unicast_packets_transmitted_lo, + qstats->total_unicast_packets_transmitted_lo); + ADD_64(fstats->total_multicast_packets_transmitted_hi, + qstats->total_multicast_packets_transmitted_hi, + fstats->total_multicast_packets_transmitted_lo, + qstats->total_multicast_packets_transmitted_lo); + ADD_64(fstats->total_broadcast_packets_transmitted_hi, + qstats->total_broadcast_packets_transmitted_hi, + fstats->total_broadcast_packets_transmitted_lo, + qstats->total_broadcast_packets_transmitted_lo); + ADD_64(fstats->valid_bytes_received_hi, + qstats->valid_bytes_received_hi, + fstats->valid_bytes_received_lo, + qstats->valid_bytes_received_lo); + + ADD_64(estats->error_bytes_received_hi, + qstats->error_bytes_received_hi, + estats->error_bytes_received_lo, + qstats->error_bytes_received_lo); + ADD_64(estats->etherstatsoverrsizepkts_hi, + qstats->etherstatsoverrsizepkts_hi, + estats->etherstatsoverrsizepkts_lo, + qstats->etherstatsoverrsizepkts_lo); + ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, + estats->no_buff_discard_lo, qstats->no_buff_discard_lo); + } + + ADD_64(fstats->total_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + fstats->total_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + memcpy(estats, &(fstats->total_bytes_received_hi), + sizeof(struct host_func_stats) - 2*sizeof(u32)); + + ADD_64(estats->etherstatsoverrsizepkts_hi, + estats->rx_stat_dot3statsframestoolong_hi, + estats->etherstatsoverrsizepkts_lo, + estats->rx_stat_dot3statsframestoolong_lo); + ADD_64(estats->error_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->error_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + if (bp->port.pmf) { + estats->mac_filter_discard = + le32_to_cpu(tport->mac_filter_discard); + estats->xxoverflow_discard = + le32_to_cpu(tport->xxoverflow_discard); + estats->brb_truncate_discard = + le32_to_cpu(tport->brb_truncate_discard); + estats->mac_discard = le32_to_cpu(tport->mac_discard); + } + + fstats->host_func_stats_start = ++fstats->host_func_stats_end; + + bp->stats_pending = 0; + + return 0; +} + +static void bnx2x_net_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct net_device_stats *nstats = &bp->dev->stats; + int i; + + nstats->rx_packets = + bnx2x_hilo(&estats->total_unicast_packets_received_hi) + + bnx2x_hilo(&estats->total_multicast_packets_received_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_received_hi); + + nstats->tx_packets = + bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); + + nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); + + nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); + + nstats->rx_dropped = estats->mac_discard; + for_each_queue(bp, i) + nstats->rx_dropped += + le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); + + nstats->tx_dropped = 0; + + nstats->multicast = + bnx2x_hilo(&estats->total_multicast_packets_received_hi); + + nstats->collisions = + bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); + + nstats->rx_length_errors = + bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + + bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); + nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + + bnx2x_hilo(&estats->brb_truncate_hi); + nstats->rx_crc_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); + nstats->rx_frame_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); + nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); + nstats->rx_missed_errors = estats->xxoverflow_discard; + + nstats->rx_errors = nstats->rx_length_errors + + nstats->rx_over_errors + + nstats->rx_crc_errors + + nstats->rx_frame_errors + + nstats->rx_fifo_errors + + nstats->rx_missed_errors; + + nstats->tx_aborted_errors = + bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + + bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); + nstats->tx_carrier_errors = + bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); + nstats->tx_fifo_errors = 0; + nstats->tx_heartbeat_errors = 0; + nstats->tx_window_errors = 0; + + nstats->tx_errors = nstats->tx_aborted_errors + + nstats->tx_carrier_errors + + bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); +} + +static void bnx2x_drv_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; + + estats->driver_xoff = 0; + estats->rx_err_discard_pkt = 0; + estats->rx_skb_alloc_failed = 0; + estats->hw_csum_err = 0; + for_each_queue(bp, i) { + struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; + + estats->driver_xoff += qstats->driver_xoff; + estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; + estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; + estats->hw_csum_err += qstats->hw_csum_err; + } +} + +static void bnx2x_stats_update(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + if (*stats_comp != DMAE_COMP_VAL) + return; + + if (bp->port.pmf) + bnx2x_hw_stats_update(bp); + + if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + return; + } + + bnx2x_net_stats_update(bp); + bnx2x_drv_stats_update(bp); + + if (netif_msg_timer(bp)) { + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; + + printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", + bp->dev->name, + estats->brb_drop_lo, estats->brb_truncate_lo); + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + + printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)" + " rx pkt(%lu) rx calls(%lu %lu)\n", + fp->name, (le16_to_cpu(*fp->rx_cons_sb) - + fp->rx_comp_cons), + le16_to_cpu(*fp->rx_cons_sb), + bnx2x_hilo(&qstats-> + total_unicast_packets_received_hi), + fp->rx_calls, fp->rx_pkt); + } + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct netdev_queue *txq = + netdev_get_tx_queue(bp->dev, i); + + printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)" + " tx pkt(%lu) tx calls (%lu)" + " %s (Xoff events %u)\n", + fp->name, bnx2x_tx_avail(fp), + le16_to_cpu(*fp->tx_cons_sb), + bnx2x_hilo(&qstats-> + total_unicast_packets_transmitted_hi), + fp->tx_pkt, + (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"), + qstats->driver_xoff); + } + } + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); +} + +static void bnx2x_port_stats_stop(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + bp->executer_idx = 0; + + opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + if (bp->func_stx) + dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); + else + dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + if (bp->func_stx) { + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + dmae->comp_addr_lo = + U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = + U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } +} + +static void bnx2x_stats_stop(struct bnx2x *bp) +{ + int update = 0; + + bnx2x_stats_comp(bp); + + if (bp->port.pmf) + update = (bnx2x_hw_stats_update(bp) == 0); + + update |= (bnx2x_storm_stats_update(bp) == 0); + + if (update) { + bnx2x_net_stats_update(bp); + + if (bp->port.pmf) + bnx2x_port_stats_stop(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } +} + +static void bnx2x_stats_do_nothing(struct bnx2x *bp) +{ +} + +static const struct { + void (*action)(struct bnx2x *bp); + enum bnx2x_stats_state next_state; +} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { +/* state event */ +{ +/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, +/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, +/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} +}, +{ +/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, +/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, +/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} +} +}; + +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) +{ + enum bnx2x_stats_state state; + + if (unlikely(bp->panic)) + return; + + /* Protect a state change flow */ + spin_lock_bh(&bp->stats_lock); + state = bp->stats_state; + bp->stats_state = bnx2x_stats_stm[state][event].next_state; + spin_unlock_bh(&bp->stats_lock); + + bnx2x_stats_stm[state][event].action(bp); + + if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) + DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", + state, event, bp->stats_state); +} + +static void bnx2x_port_stats_base_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +static void bnx2x_func_stats_base_init(struct bnx2x *bp) +{ + int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX; + int port = BP_PORT(bp); + int func; + u32 func_stx; + + /* sanity */ + if (!bp->port.pmf || !bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + /* save our func_stx */ + func_stx = bp->func_stx; + + for (vn = VN_0; vn < vn_max; vn++) { + func = 2*vn + port; + + bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); + bnx2x_func_stats_init(bp); + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } + + /* restore our func_stx */ + bp->func_stx = func_stx; +} + +static void bnx2x_func_stats_base_update(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | +#ifdef __BIG_ENDIAN + DMAE_CMD_ENDIANITY_B_DW_SWAP | +#else + DMAE_CMD_ENDIANITY_DW_SWAP | +#endif + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae->src_addr_lo = bp->func_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +void bnx2x_stats_init(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int i; + + bp->stats_pending = 0; + bp->executer_idx = 0; + bp->stats_counter = 0; + + /* port and func stats for management */ + if (!BP_NOMCP(bp)) { + bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); + bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); + + } else { + bp->port.port_stx = 0; + bp->func_stx = 0; + } + DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", + bp->port.port_stx, bp->func_stx); + + /* port stats */ + memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); + bp->port.old_nig_stats.brb_discard = + REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); + bp->port.old_nig_stats.brb_truncate = + REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); + + /* function stats */ + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + memset(&fp->old_tclient, 0, + sizeof(struct tstorm_per_client_stats)); + memset(&fp->old_uclient, 0, + sizeof(struct ustorm_per_client_stats)); + memset(&fp->old_xclient, 0, + sizeof(struct xstorm_per_client_stats)); + memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); + } + + memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); + memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); + + bp->stats_state = STATS_STATE_DISABLED; + + if (bp->port.pmf) { + if (bp->port.port_stx) + bnx2x_port_stats_base_init(bp); + + if (bp->func_stx) + bnx2x_func_stats_base_init(bp); + + } else if (bp->func_stx) + bnx2x_func_stats_base_update(bp); +} diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..38a4e908f4fbe141be6ee1149a06727036a9e0d3 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_stats.h @@ -0,0 +1,239 @@ +/* bnx2x_stats.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + */ + +#ifndef BNX2X_STATS_H +#define BNX2X_STATS_H + +#include + +struct bnx2x_eth_q_stats { + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 error_bytes_received_hi; + u32 error_bytes_received_lo; + u32 etherstatsoverrsizepkts_hi; + u32 etherstatsoverrsizepkts_lo; + u32 no_buff_discard_hi; + u32 no_buff_discard_lo; + + u32 driver_xoff; + u32 rx_err_discard_pkt; + u32 rx_skb_alloc_failed; + u32 hw_csum_err; +}; + +#define BNX2X_NUM_Q_STATS 13 +#define Q_STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) + +struct nig_stats { + u32 brb_discard; + u32 brb_packet; + u32 brb_truncate; + u32 flow_ctrl_discard; + u32 flow_ctrl_octets; + u32 flow_ctrl_packet; + u32 mng_discard; + u32 mng_octet_inp; + u32 mng_octet_out; + u32 mng_packet_inp; + u32 mng_packet_out; + u32 pbf_octets; + u32 pbf_packet; + u32 safc_inp; + u32 egress_mac_pkt0_lo; + u32 egress_mac_pkt0_hi; + u32 egress_mac_pkt1_lo; + u32 egress_mac_pkt1_hi; +}; + + +enum bnx2x_stats_event { + STATS_EVENT_PMF = 0, + STATS_EVENT_LINK_UP, + STATS_EVENT_UPDATE, + STATS_EVENT_STOP, + STATS_EVENT_MAX +}; + +enum bnx2x_stats_state { + STATS_STATE_DISABLED = 0, + STATS_STATE_ENABLED, + STATS_STATE_MAX +}; + +struct bnx2x_eth_stats { + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 error_bytes_received_hi; + u32 error_bytes_received_lo; + u32 etherstatsoverrsizepkts_hi; + u32 etherstatsoverrsizepkts_lo; + u32 no_buff_discard_hi; + u32 no_buff_discard_lo; + + u32 rx_stat_ifhcinbadoctets_hi; + u32 rx_stat_ifhcinbadoctets_lo; + u32 tx_stat_ifhcoutbadoctets_hi; + u32 tx_stat_ifhcoutbadoctets_lo; + u32 rx_stat_dot3statsfcserrors_hi; + u32 rx_stat_dot3statsfcserrors_lo; + u32 rx_stat_dot3statsalignmenterrors_hi; + u32 rx_stat_dot3statsalignmenterrors_lo; + u32 rx_stat_dot3statscarriersenseerrors_hi; + u32 rx_stat_dot3statscarriersenseerrors_lo; + u32 rx_stat_falsecarriererrors_hi; + u32 rx_stat_falsecarriererrors_lo; + u32 rx_stat_etherstatsundersizepkts_hi; + u32 rx_stat_etherstatsundersizepkts_lo; + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; + u32 rx_stat_etherstatsfragments_hi; + u32 rx_stat_etherstatsfragments_lo; + u32 rx_stat_etherstatsjabbers_hi; + u32 rx_stat_etherstatsjabbers_lo; + u32 rx_stat_maccontrolframesreceived_hi; + u32 rx_stat_maccontrolframesreceived_lo; + u32 rx_stat_bmac_xpf_hi; + u32 rx_stat_bmac_xpf_lo; + u32 rx_stat_bmac_xcf_hi; + u32 rx_stat_bmac_xcf_lo; + u32 rx_stat_xoffstateentered_hi; + u32 rx_stat_xoffstateentered_lo; + u32 rx_stat_xonpauseframesreceived_hi; + u32 rx_stat_xonpauseframesreceived_lo; + u32 rx_stat_xoffpauseframesreceived_hi; + u32 rx_stat_xoffpauseframesreceived_lo; + u32 tx_stat_outxonsent_hi; + u32 tx_stat_outxonsent_lo; + u32 tx_stat_outxoffsent_hi; + u32 tx_stat_outxoffsent_lo; + u32 tx_stat_flowcontroldone_hi; + u32 tx_stat_flowcontroldone_lo; + u32 tx_stat_etherstatscollisions_hi; + u32 tx_stat_etherstatscollisions_lo; + u32 tx_stat_dot3statssinglecollisionframes_hi; + u32 tx_stat_dot3statssinglecollisionframes_lo; + u32 tx_stat_dot3statsmultiplecollisionframes_hi; + u32 tx_stat_dot3statsmultiplecollisionframes_lo; + u32 tx_stat_dot3statsdeferredtransmissions_hi; + u32 tx_stat_dot3statsdeferredtransmissions_lo; + u32 tx_stat_dot3statsexcessivecollisions_hi; + u32 tx_stat_dot3statsexcessivecollisions_lo; + u32 tx_stat_dot3statslatecollisions_hi; + u32 tx_stat_dot3statslatecollisions_lo; + u32 tx_stat_etherstatspkts64octets_hi; + u32 tx_stat_etherstatspkts64octets_lo; + u32 tx_stat_etherstatspkts65octetsto127octets_hi; + u32 tx_stat_etherstatspkts65octetsto127octets_lo; + u32 tx_stat_etherstatspkts128octetsto255octets_hi; + u32 tx_stat_etherstatspkts128octetsto255octets_lo; + u32 tx_stat_etherstatspkts256octetsto511octets_hi; + u32 tx_stat_etherstatspkts256octetsto511octets_lo; + u32 tx_stat_etherstatspkts512octetsto1023octets_hi; + u32 tx_stat_etherstatspkts512octetsto1023octets_lo; + u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; + u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; + u32 tx_stat_etherstatspktsover1522octets_hi; + u32 tx_stat_etherstatspktsover1522octets_lo; + u32 tx_stat_bmac_2047_hi; + u32 tx_stat_bmac_2047_lo; + u32 tx_stat_bmac_4095_hi; + u32 tx_stat_bmac_4095_lo; + u32 tx_stat_bmac_9216_hi; + u32 tx_stat_bmac_9216_lo; + u32 tx_stat_bmac_16383_hi; + u32 tx_stat_bmac_16383_lo; + u32 tx_stat_dot3statsinternalmactransmiterrors_hi; + u32 tx_stat_dot3statsinternalmactransmiterrors_lo; + u32 tx_stat_bmac_ufl_hi; + u32 tx_stat_bmac_ufl_lo; + + u32 pause_frames_received_hi; + u32 pause_frames_received_lo; + u32 pause_frames_sent_hi; + u32 pause_frames_sent_lo; + + u32 etherstatspkts1024octetsto1522octets_hi; + u32 etherstatspkts1024octetsto1522octets_lo; + u32 etherstatspktsover1522octets_hi; + u32 etherstatspktsover1522octets_lo; + + u32 brb_drop_hi; + u32 brb_drop_lo; + u32 brb_truncate_hi; + u32 brb_truncate_lo; + + u32 mac_filter_discard; + u32 xxoverflow_discard; + u32 brb_truncate_discard; + u32 mac_discard; + + u32 driver_xoff; + u32 rx_err_discard_pkt; + u32 rx_skb_alloc_failed; + u32 hw_csum_err; + + u32 nig_timer_max; +}; + +#define BNX2X_NUM_STATS 43 +#define STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_stats, stat_name) / 4) + +/* Forward declaration */ +struct bnx2x; + + +void bnx2x_stats_init(struct bnx2x *bp); + +extern const u32 dmae_reg_go_c[]; +extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int common); + + +#endif /* BNX2X_STATS_H */ diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8d7dfd2f1e90cf1bd7d32fcc1a0aa1da87a61332..c746b331771d38f38771c89aa492ede20cbdf711 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -233,34 +233,27 @@ static void tlb_deinitialize(struct bonding *bond) _unlock_tx_hashtbl(bond); } +static long long compute_gap(struct slave *slave) +{ + return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */ + (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ +} + /* Caller must hold bond lock for read */ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) { struct slave *slave, *least_loaded; - s64 max_gap; - int i, found = 0; - - /* Find the first enabled slave */ - bond_for_each_slave(bond, slave, i) { - if (SLAVE_IS_OK(slave)) { - found = 1; - break; - } - } - - if (!found) { - return NULL; - } + long long max_gap; + int i; - least_loaded = slave; - max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */ - (s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ + least_loaded = NULL; + max_gap = LLONG_MIN; /* Find the slave with the largest gap */ - bond_for_each_slave_from(bond, slave, i, least_loaded) { + bond_for_each_slave(bond, slave, i) { if (SLAVE_IS_OK(slave)) { - s64 gap = (s64)(slave->speed << 20) - - (s64)(SLAVE_TLB_INFO(slave).load << 3); + long long gap = compute_gap(slave); + if (max_gap < gap) { least_loaded = slave; max_gap = gap; @@ -689,7 +682,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon client_info->ntt = 0; } - if (!list_empty(&bond->vlan_list)) { + if (bond->vlgrp) { if (!vlan_get_tag(skb, &client_info->vlan_id)) client_info->tag = 1; } @@ -911,7 +904,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; - if (!list_empty(&bond->vlan_list)) { + if (bond->vlgrp) { struct vlan_entry *vlan; vlan = bond_next_vlan(bond, diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c index 969ffed86b9f4e217581f61aa67a5cda815c21f9..121b073a6c3fae1985c164071f0a8f285dba0c14 100644 --- a/drivers/net/bonding/bond_ipv6.c +++ b/drivers/net/bonding/bond_ipv6.c @@ -178,6 +178,8 @@ static int bond_inet6addr_event(struct notifier_block *this, } list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { + if (!bond->vlgrp) + continue; vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); if (vlan_dev == event_dev) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c3d98dde2f86a5812067b2ef8ee52659b66dcda5..2cc4cfc31892cd85458dec20b6c46401fdb90d9b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -90,6 +90,7 @@ #define BOND_LINK_ARP_INTERV 0 static int max_bonds = BOND_DEFAULT_MAX_BONDS; +static int tx_queues = BOND_DEFAULT_TX_QUEUES; static int num_grat_arp = 1; static int num_unsol_na = 1; static int miimon = BOND_LINK_MON_INTERV; @@ -106,10 +107,13 @@ static int arp_interval = BOND_LINK_ARP_INTERV; static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; static char *arp_validate; static char *fail_over_mac; +static int all_slaves_active = 0; static struct bond_params bonding_defaults; module_param(max_bonds, int, 0); MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); +module_param(tx_queues, int, 0); +MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); module_param(num_grat_arp, int, 0644); MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); module_param(num_unsol_na, int, 0644); @@ -155,6 +159,10 @@ module_param(arp_validate, charp, 0); MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); module_param(fail_over_mac, charp, 0); MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); +module_param(all_slaves_active, int, 0); +MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" + "by setting active flag for all slaves. " + "0 for never (default), 1 for always."); /*----------------------------- Global variables ----------------------------*/ @@ -168,7 +176,9 @@ static int arp_ip_count; static int bond_mode = BOND_MODE_ROUNDROBIN; static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; static int lacp_fast; +#ifdef CONFIG_NET_POLL_CONTROLLER static int disable_netpoll = 1; +#endif const struct bond_parm_tbl bond_lacp_tbl[] = { { "slow", AD_LACP_SLOW}, @@ -414,6 +424,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, { unsigned short uninitialized_var(vlan_id); + /* Test vlan_list not vlgrp to catch and handle 802.1p tags */ if (!list_empty(&bond->vlan_list) && !(slave_dev->features & NETIF_F_HW_VLAN_TX) && vlan_get_tag(skb, &vlan_id) == 0) { @@ -477,7 +488,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev, struct slave *slave; int i; + write_lock(&bond->lock); bond->vlgrp = grp; + write_unlock(&bond->lock); bond_for_each_slave(bond, slave, i) { struct net_device *slave_dev = slave->dev; @@ -557,10 +570,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla struct vlan_entry *vlan; const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - write_lock_bh(&bond->lock); - - if (list_empty(&bond->vlan_list)) - goto out; + if (!bond->vlgrp) + return; if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && slave_ops->ndo_vlan_rx_register) @@ -568,13 +579,10 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || !(slave_ops->ndo_vlan_rx_add_vid)) - goto out; + return; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id); - -out: - write_unlock_bh(&bond->lock); } static void bond_del_vlans_from_slave(struct bonding *bond, @@ -584,16 +592,16 @@ static void bond_del_vlans_from_slave(struct bonding *bond, struct vlan_entry *vlan; struct net_device *vlan_dev; - write_lock_bh(&bond->lock); - - if (list_empty(&bond->vlan_list)) - goto out; + if (!bond->vlgrp) + return; if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || !(slave_ops->ndo_vlan_rx_kill_vid)) goto unreg; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { + if (!vlan->vlan_id) + continue; /* Save and then restore vlan_dev in the grp array, * since the slave's driver might clear it. */ @@ -606,9 +614,6 @@ static void bond_del_vlans_from_slave(struct bonding *bond, if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && slave_ops->ndo_vlan_rx_register) slave_ops->ndo_vlan_rx_register(slave_dev, NULL); - -out: - write_unlock_bh(&bond->lock); } /*------------------------------- Link status -------------------------------*/ @@ -1433,7 +1438,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* no need to lock since we're protected by rtnl_lock */ if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); - if (!list_empty(&bond->vlan_list)) { + if (bond->vlgrp) { pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n", bond_dev->name, slave_dev->name, bond_dev->name); return -EPERM; @@ -1522,16 +1527,32 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } } + /* If this is the first slave, then we need to set the master's hardware + * address to be the same as the slave's. */ + if (bond->slave_cnt == 0) + memcpy(bond->dev->dev_addr, slave_dev->dev_addr, + slave_dev->addr_len); + + new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); if (!new_slave) { res = -ENOMEM; goto err_undo_flags; } - /* save slave's original flags before calling - * netdev_set_master and dev_open + /* + * Set the new_slave's queue_id to be zero. Queue ID mapping + * is set via sysfs or module option if desired. */ - new_slave->original_flags = slave_dev->flags; + new_slave->queue_id = 0; + + /* Save slave's original mtu and then set it to match the bond */ + new_slave->original_mtu = slave_dev->mtu; + res = dev_set_mtu(slave_dev, bond->dev->mtu); + if (res) { + pr_debug("Error %d calling dev_set_mtu\n", res); + goto err_free; + } /* * Save slave's original ("permanent") mac address for modes @@ -1550,7 +1571,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = dev_set_mac_address(slave_dev, &addr); if (res) { pr_debug("Error %d calling set_mac_address\n", res); - goto err_free; + goto err_restore_mtu; } } @@ -1793,6 +1814,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) dev_set_mac_address(slave_dev, &addr); } +err_restore_mtu: + dev_set_mtu(slave_dev, new_slave->original_mtu); + err_free: kfree(new_slave); @@ -1913,7 +1937,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) */ memset(bond_dev->dev_addr, 0, bond_dev->addr_len); - if (list_empty(&bond->vlan_list)) { + if (!bond->vlgrp) { bond_dev->features |= NETIF_F_VLAN_CHALLENGED; } else { pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", @@ -1980,6 +2004,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) dev_set_mac_address(slave_dev, &addr); } + dev_set_mtu(slave_dev, slave->original_mtu); + slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | IFF_SLAVE_INACTIVE | IFF_BONDING | IFF_SLAVE_NEEDARP); @@ -2103,9 +2129,9 @@ static int bond_release_all(struct net_device *bond_dev) */ memset(bond_dev->dev_addr, 0, bond_dev->addr_len); - if (list_empty(&bond->vlan_list)) + if (!bond->vlgrp) { bond_dev->features |= NETIF_F_VLAN_CHALLENGED; - else { + } else { pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", bond_dev->name, bond_dev->name); pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", @@ -2538,7 +2564,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) if (!targets[i]) break; pr_debug("basa: target %x\n", targets[i]); - if (list_empty(&bond->vlan_list)) { + if (!bond->vlgrp) { pr_debug("basa: empty vlan: arp_send\n"); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], bond->master_ip, 0); @@ -2566,7 +2592,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) /* * This target is not on a VLAN */ - if (rt->u.dst.dev == bond->dev) { + if (rt->dst.dev == bond->dev) { ip_rt_put(rt); pr_debug("basa: rtdev == bond->dev: arp_send\n"); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], @@ -2577,7 +2603,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) vlan_id = 0; list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); - if (vlan_dev == rt->u.dst.dev) { + if (vlan_dev == rt->dst.dev) { vlan_id = vlan->vlan_id; pr_debug("basa: vlan match on %s %d\n", vlan_dev->name, vlan_id); @@ -2595,7 +2621,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) if (net_ratelimit()) { pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &fl.fl4_dst, - rt->u.dst.dev ? rt->u.dst.dev->name : "NULL"); + rt->dst.dev ? rt->dst.dev->name : "NULL"); } ip_rt_put(rt); } @@ -2627,6 +2653,9 @@ static void bond_send_gratuitous_arp(struct bonding *bond) bond->master_ip, 0); } + if (!bond->vlgrp) + return; + list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); if (vlan->vlan_ip) { @@ -3276,6 +3305,7 @@ static void bond_info_show_slave(struct seq_file *seq, else seq_puts(seq, "Aggregator ID: N/A\n"); } + seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); } static int bond_info_seq_show(struct seq_file *seq, void *v) @@ -3558,6 +3588,8 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, } list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { + if (!bond->vlgrp) + continue; vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); if (vlan_dev == event_dev) { switch (event) { @@ -3785,50 +3817,49 @@ static int bond_close(struct net_device *bond_dev) return 0; } -static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) +static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, + struct rtnl_link_stats64 *stats) { struct bonding *bond = netdev_priv(bond_dev); - struct net_device_stats *stats = &bond_dev->stats; - struct net_device_stats local_stats; + struct rtnl_link_stats64 temp; struct slave *slave; int i; - memset(&local_stats, 0, sizeof(struct net_device_stats)); + memset(stats, 0, sizeof(*stats)); read_lock_bh(&bond->lock); bond_for_each_slave(bond, slave, i) { - const struct net_device_stats *sstats = dev_get_stats(slave->dev); + const struct rtnl_link_stats64 *sstats = + dev_get_stats(slave->dev, &temp); - local_stats.rx_packets += sstats->rx_packets; - local_stats.rx_bytes += sstats->rx_bytes; - local_stats.rx_errors += sstats->rx_errors; - local_stats.rx_dropped += sstats->rx_dropped; + stats->rx_packets += sstats->rx_packets; + stats->rx_bytes += sstats->rx_bytes; + stats->rx_errors += sstats->rx_errors; + stats->rx_dropped += sstats->rx_dropped; - local_stats.tx_packets += sstats->tx_packets; - local_stats.tx_bytes += sstats->tx_bytes; - local_stats.tx_errors += sstats->tx_errors; - local_stats.tx_dropped += sstats->tx_dropped; + stats->tx_packets += sstats->tx_packets; + stats->tx_bytes += sstats->tx_bytes; + stats->tx_errors += sstats->tx_errors; + stats->tx_dropped += sstats->tx_dropped; - local_stats.multicast += sstats->multicast; - local_stats.collisions += sstats->collisions; + stats->multicast += sstats->multicast; + stats->collisions += sstats->collisions; - local_stats.rx_length_errors += sstats->rx_length_errors; - local_stats.rx_over_errors += sstats->rx_over_errors; - local_stats.rx_crc_errors += sstats->rx_crc_errors; - local_stats.rx_frame_errors += sstats->rx_frame_errors; - local_stats.rx_fifo_errors += sstats->rx_fifo_errors; - local_stats.rx_missed_errors += sstats->rx_missed_errors; + stats->rx_length_errors += sstats->rx_length_errors; + stats->rx_over_errors += sstats->rx_over_errors; + stats->rx_crc_errors += sstats->rx_crc_errors; + stats->rx_frame_errors += sstats->rx_frame_errors; + stats->rx_fifo_errors += sstats->rx_fifo_errors; + stats->rx_missed_errors += sstats->rx_missed_errors; - local_stats.tx_aborted_errors += sstats->tx_aborted_errors; - local_stats.tx_carrier_errors += sstats->tx_carrier_errors; - local_stats.tx_fifo_errors += sstats->tx_fifo_errors; - local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors; - local_stats.tx_window_errors += sstats->tx_window_errors; + stats->tx_aborted_errors += sstats->tx_aborted_errors; + stats->tx_carrier_errors += sstats->tx_carrier_errors; + stats->tx_fifo_errors += sstats->tx_fifo_errors; + stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; + stats->tx_window_errors += sstats->tx_window_errors; } - memcpy(stats, &local_stats, sizeof(struct net_device_stats)); - read_unlock_bh(&bond->lock); return stats; @@ -4412,9 +4443,59 @@ static void bond_set_xmit_hash_policy(struct bonding *bond) } } +/* + * Lookup the slave that corresponds to a qid + */ +static inline int bond_slave_override(struct bonding *bond, + struct sk_buff *skb) +{ + int i, res = 1; + struct slave *slave = NULL; + struct slave *check_slave; + + read_lock(&bond->lock); + + if (!BOND_IS_OK(bond) || !skb->queue_mapping) + goto out; + + /* Find out if any slaves have the same mapping as this skb. */ + bond_for_each_slave(bond, check_slave, i) { + if (check_slave->queue_id == skb->queue_mapping) { + slave = check_slave; + break; + } + } + + /* If the slave isn't UP, use default transmit policy. */ + if (slave && slave->queue_id && IS_UP(slave->dev) && + (slave->link == BOND_LINK_UP)) { + res = bond_dev_queue_xmit(bond, skb, slave->dev); + } + +out: + read_unlock(&bond->lock); + return res; +} + +static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + /* + * This helper function exists to help dev_pick_tx get the correct + * destination queue. Using a helper function skips the a call to + * skb_tx_hash and will put the skbs in the queue we expect on their + * way down to the bonding driver. + */ + return skb->queue_mapping; +} + static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) { - const struct bonding *bond = netdev_priv(dev); + struct bonding *bond = netdev_priv(dev); + + if (TX_QUEUE_OVERRIDE(bond->params.mode)) { + if (!bond_slave_override(bond, skb)) + return NETDEV_TX_OK; + } switch (bond->params.mode) { case BOND_MODE_ROUNDROBIN: @@ -4499,7 +4580,8 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_open = bond_open, .ndo_stop = bond_close, .ndo_start_xmit = bond_start_xmit, - .ndo_get_stats = bond_get_stats, + .ndo_select_queue = bond_select_queue, + .ndo_get_stats64 = bond_get_stats, .ndo_do_ioctl = bond_do_ioctl, .ndo_set_multicast_list = bond_set_multicast_list, .ndo_change_mtu = bond_change_mtu, @@ -4604,6 +4686,7 @@ static void bond_work_cancel_all(struct bonding *bond) static void bond_uninit(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct vlan_entry *vlan, *tmp; bond_netpoll_cleanup(bond_dev); @@ -4617,6 +4700,11 @@ static void bond_uninit(struct net_device *bond_dev) bond_remove_proc_entry(bond); __hw_addr_flush(&bond->mc_list); + + list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { + list_del(&vlan->vlan_list); + kfree(vlan); + } } /*------------------------- Module initialization ---------------------------*/ @@ -4767,6 +4855,20 @@ static int bond_check_params(struct bond_params *params) } } + if (tx_queues < 1 || tx_queues > 255) { + pr_warning("Warning: tx_queues (%d) should be between " + "1 and 255, resetting to %d\n", + tx_queues, BOND_DEFAULT_TX_QUEUES); + tx_queues = BOND_DEFAULT_TX_QUEUES; + } + + if ((all_slaves_active != 0) && (all_slaves_active != 1)) { + pr_warning("Warning: all_slaves_active module parameter (%d), " + "not of valid value (0/1), so it was set to " + "0\n", all_slaves_active); + all_slaves_active = 0; + } + /* reset values for TLB/ALB */ if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { @@ -4937,6 +5039,8 @@ static int bond_check_params(struct bond_params *params) params->primary[0] = 0; params->primary_reselect = primary_reselect_value; params->fail_over_mac = fail_over_mac_value; + params->tx_queues = tx_queues; + params->all_slaves_active = all_slaves_active; if (primary) { strncpy(params->primary, primary, IFNAMSIZ); @@ -5023,8 +5127,8 @@ int bond_create(struct net *net, const char *name) rtnl_lock(); - bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", - bond_setup); + bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "", + bond_setup, tx_queues); if (!bond_dev) { pr_err("%s: eek! can't alloc netdev!\n", name); rtnl_unlock(); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index b8bec086daa155b2c6f36884a059f4749f5636fd..c311aed9bd022c2870b33fdc8c88a953d6cceb93 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -211,7 +211,8 @@ static ssize_t bonding_show_slaves(struct device *d, /* * Set the slaves in the current bond. The bond interface must be * up for this to succeed. - * This function is largely the same flow as bonding_update_bonds(). + * This is supposed to be only thin wrapper for bond_enslave and bond_release. + * All hard work should be done there. */ static ssize_t bonding_store_slaves(struct device *d, struct device_attribute *attr, @@ -219,10 +220,8 @@ static ssize_t bonding_store_slaves(struct device *d, { char command[IFNAMSIZ + 1] = { 0, }; char *ifname; - int i, res, found, ret = count; - u32 original_mtu; - struct slave *slave; - struct net_device *dev = NULL; + int res, ret = count; + struct net_device *dev; struct bonding *bond = to_bond(d); /* Quick sanity check -- is the bond interface up? */ @@ -231,8 +230,6 @@ static ssize_t bonding_store_slaves(struct device *d, bond->dev->name); } - /* Note: We can't hold bond->lock here, as bond_create grabs it. */ - if (!rtnl_trylock()) return restart_syscall(); @@ -242,91 +239,33 @@ static ssize_t bonding_store_slaves(struct device *d, !dev_valid_name(ifname)) goto err_no_cmd; - if (command[0] == '+') { - - /* Got a slave name in ifname. Is it already in the list? */ - found = 0; - - dev = __dev_get_by_name(dev_net(bond->dev), ifname); - if (!dev) { - pr_info("%s: Interface %s does not exist!\n", - bond->dev->name, ifname); - ret = -ENODEV; - goto out; - } - - if (dev->flags & IFF_UP) { - pr_err("%s: Error: Unable to enslave %s because it is already up.\n", - bond->dev->name, dev->name); - ret = -EPERM; - goto out; - } - - read_lock(&bond->lock); - bond_for_each_slave(bond, slave, i) - if (slave->dev == dev) { - pr_err("%s: Interface %s is already enslaved!\n", - bond->dev->name, ifname); - ret = -EPERM; - read_unlock(&bond->lock); - goto out; - } - read_unlock(&bond->lock); - - pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname); - - /* If this is the first slave, then we need to set - the master's hardware address to be the same as the - slave's. */ - if (is_zero_ether_addr(bond->dev->dev_addr)) - memcpy(bond->dev->dev_addr, dev->dev_addr, - dev->addr_len); - - /* Set the slave's MTU to match the bond */ - original_mtu = dev->mtu; - res = dev_set_mtu(dev, bond->dev->mtu); - if (res) { - ret = res; - goto out; - } + dev = __dev_get_by_name(dev_net(bond->dev), ifname); + if (!dev) { + pr_info("%s: Interface %s does not exist!\n", + bond->dev->name, ifname); + ret = -ENODEV; + goto out; + } + switch (command[0]) { + case '+': + pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name); res = bond_enslave(bond->dev, dev); - bond_for_each_slave(bond, slave, i) - if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) - slave->original_mtu = original_mtu; - if (res) - ret = res; + break; - goto out; - } + case '-': + pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name); + res = bond_release(bond->dev, dev); + break; - if (command[0] == '-') { - dev = NULL; - original_mtu = 0; - bond_for_each_slave(bond, slave, i) - if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { - dev = slave->dev; - original_mtu = slave->original_mtu; - break; - } - if (dev) { - pr_info("%s: Removing slave %s\n", - bond->dev->name, dev->name); - res = bond_release(bond->dev, dev); - if (res) { - ret = res; - goto out; - } - /* set the slave MTU to the default */ - dev_set_mtu(dev, original_mtu); - } else { - pr_err("unable to remove non-existent slave %s for bond %s.\n", - ifname, bond->dev->name); - ret = -ENODEV; - } - goto out; + default: + goto err_no_cmd; } + if (res) + ret = res; + goto out; + err_no_cmd: pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name); @@ -374,19 +313,26 @@ static ssize_t bonding_store_mode(struct device *d, bond->dev->name, (int)strlen(buf) - 1, buf); ret = -EINVAL; goto out; - } else { - if (bond->params.mode == BOND_MODE_8023AD) - bond_unset_master_3ad_flags(bond); + } + if ((new_value == BOND_MODE_ALB || + new_value == BOND_MODE_TLB) && + bond->params.arp_interval) { + pr_err("%s: %s mode is incompatible with arp monitoring.\n", + bond->dev->name, bond_mode_tbl[new_value].modename); + ret = -EINVAL; + goto out; + } + if (bond->params.mode == BOND_MODE_8023AD) + bond_unset_master_3ad_flags(bond); - if (bond->params.mode == BOND_MODE_ALB) - bond_unset_master_alb_flags(bond); + if (bond->params.mode == BOND_MODE_ALB) + bond_unset_master_alb_flags(bond); - bond->params.mode = new_value; - bond_set_mode_ops(bond, bond->params.mode); - pr_info("%s: setting mode to %s (%d).\n", - bond->dev->name, bond_mode_tbl[new_value].modename, - new_value); - } + bond->params.mode = new_value; + bond_set_mode_ops(bond, bond->params.mode); + pr_info("%s: setting mode to %s (%d).\n", + bond->dev->name, bond_mode_tbl[new_value].modename, + new_value); out: return ret; } @@ -571,7 +517,13 @@ static ssize_t bonding_store_arp_interval(struct device *d, ret = -EINVAL; goto out; } - + if (bond->params.mode == BOND_MODE_ALB || + bond->params.mode == BOND_MODE_TLB) { + pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n", + bond->dev->name, bond->dev->name); + ret = -EINVAL; + goto out; + } pr_info("%s: Setting ARP monitoring interval to %d.\n", bond->dev->name, new_value); bond->params.arp_interval = new_value; @@ -1472,7 +1424,173 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d, } static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); +/* + * Show the queue_ids of the slaves in the current bond. + */ +static ssize_t bonding_show_queue_id(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct slave *slave; + int i, res = 0; + struct bonding *bond = to_bond(d); + + if (!rtnl_trylock()) + return restart_syscall(); + + read_lock(&bond->lock); + bond_for_each_slave(bond, slave, i) { + if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { + /* not enough space for another interface_name:queue_id pair */ + if ((PAGE_SIZE - res) > 10) + res = PAGE_SIZE - 10; + res += sprintf(buf + res, "++more++ "); + break; + } + res += sprintf(buf + res, "%s:%d ", + slave->dev->name, slave->queue_id); + } + read_unlock(&bond->lock); + if (res) + buf[res-1] = '\n'; /* eat the leftover space */ + rtnl_unlock(); + return res; +} + +/* + * Set the queue_ids of the slaves in the current bond. The bond + * interface must be enslaved for this to work. + */ +static ssize_t bonding_store_queue_id(struct device *d, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + struct slave *slave, *update_slave; + struct bonding *bond = to_bond(d); + u16 qid; + int i, ret = count; + char *delim; + struct net_device *sdev = NULL; + if (!rtnl_trylock()) + return restart_syscall(); + + /* delim will point to queue id if successful */ + delim = strchr(buffer, ':'); + if (!delim) + goto err_no_cmd; + + /* + * Terminate string that points to device name and bump it + * up one, so we can read the queue id there. + */ + *delim = '\0'; + if (sscanf(++delim, "%hd\n", &qid) != 1) + goto err_no_cmd; + + /* Check buffer length, valid ifname and queue id */ + if (strlen(buffer) > IFNAMSIZ || + !dev_valid_name(buffer) || + qid > bond->params.tx_queues) + goto err_no_cmd; + + /* Get the pointer to that interface if it exists */ + sdev = __dev_get_by_name(dev_net(bond->dev), buffer); + if (!sdev) + goto err_no_cmd; + + read_lock(&bond->lock); + + /* Search for thes slave and check for duplicate qids */ + update_slave = NULL; + bond_for_each_slave(bond, slave, i) { + if (sdev == slave->dev) + /* + * We don't need to check the matching + * slave for dups, since we're overwriting it + */ + update_slave = slave; + else if (qid && qid == slave->queue_id) { + goto err_no_cmd_unlock; + } + } + + if (!update_slave) + goto err_no_cmd_unlock; + + /* Actually set the qids for the slave */ + update_slave->queue_id = qid; + + read_unlock(&bond->lock); +out: + rtnl_unlock(); + return ret; + +err_no_cmd_unlock: + read_unlock(&bond->lock); +err_no_cmd: + pr_info("invalid input for queue_id set for %s.\n", + bond->dev->name); + ret = -EPERM; + goto out; +} + +static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id, + bonding_store_queue_id); + + +/* + * Show and set the all_slaves_active flag. + */ +static ssize_t bonding_show_slaves_active(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%d\n", bond->params.all_slaves_active); +} + +static ssize_t bonding_store_slaves_active(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int i, new_value, ret = count; + struct bonding *bond = to_bond(d); + struct slave *slave; + + if (sscanf(buf, "%d", &new_value) != 1) { + pr_err("%s: no all_slaves_active value specified.\n", + bond->dev->name); + ret = -EINVAL; + goto out; + } + + if (new_value == bond->params.all_slaves_active) + goto out; + + if ((new_value == 0) || (new_value == 1)) { + bond->params.all_slaves_active = new_value; + } else { + pr_info("%s: Ignoring invalid all_slaves_active value %d.\n", + bond->dev->name, new_value); + ret = -EINVAL; + goto out; + } + + bond_for_each_slave(bond, slave, i) { + if (slave->state == BOND_STATE_BACKUP) { + if (new_value) + slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; + else + slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; + } + } +out: + return count; +} +static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, + bonding_show_slaves_active, bonding_store_slaves_active); static struct attribute *per_bond_attrs[] = { &dev_attr_slaves.attr, @@ -1499,6 +1617,8 @@ static struct attribute *per_bond_attrs[] = { &dev_attr_ad_actor_key.attr, &dev_attr_ad_partner_key.attr, &dev_attr_ad_partner_mac.attr, + &dev_attr_queue_id.attr, + &dev_attr_all_slaves_active.attr, NULL, }; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 2aa336720591066734e9ce2ed036c81049fb1caa..c6fdd851579a77c953d259de7611c4298fec824f 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -23,8 +23,8 @@ #include "bond_3ad.h" #include "bond_alb.h" -#define DRV_VERSION "3.6.0" -#define DRV_RELDATE "September 26, 2009" +#define DRV_VERSION "3.7.0" +#define DRV_RELDATE "June 2, 2010" #define DRV_NAME "bonding" #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" @@ -60,6 +60,9 @@ ((mode) == BOND_MODE_TLB) || \ ((mode) == BOND_MODE_ALB)) +#define TX_QUEUE_OVERRIDE(mode) \ + (((mode) == BOND_MODE_ACTIVEBACKUP) || \ + ((mode) == BOND_MODE_ROUNDROBIN)) /* * Less bad way to call ioctl from within the kernel; this needs to be * done some other way to get the call out of interrupt context. @@ -131,6 +134,8 @@ struct bond_params { char primary[IFNAMSIZ]; int primary_reselect; __be32 arp_targets[BOND_MAX_ARP_TARGETS]; + int tx_queues; + int all_slaves_active; }; struct bond_parm_tbl { @@ -159,12 +164,12 @@ struct slave { s8 link; /* one of BOND_LINK_XXXX */ s8 new_link; s8 state; /* one of BOND_STATE_XXXX */ - u32 original_flags; u32 original_mtu; u32 link_failure_count; u8 perm_hwaddr[ETH_ALEN]; u16 speed; u8 duplex; + u16 queue_id; struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ struct tlb_slave_info tlb_info; }; @@ -291,7 +296,8 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave) struct bonding *bond = netdev_priv(slave->dev->master); if (!bond_is_lb(bond)) slave->state = BOND_STATE_BACKUP; - slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; + if (!bond->params.all_slaves_active) + slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; if (slave_do_arp_validate(bond, slave)) slave->dev->priv_flags |= IFF_SLAVE_NEEDARP; } diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index 0b28e0107697e58349d66695f6c5e24fbcc53497..631a6242b0112aec566b9df0916eb9b1d1552be5 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -2,16 +2,32 @@ # CAIF physical drivers # -if CAIF - comment "CAIF transport drivers" config CAIF_TTY tristate "CAIF TTY transport driver" + depends on CAIF default n ---help--- The CAIF TTY transport driver is a Line Discipline (ldisc) identified as N_CAIF. When this ldisc is opened from user space it will redirect the TTY's traffic into the CAIF stack. -endif # CAIF +config CAIF_SPI_SLAVE + tristate "CAIF SPI transport driver for slave interface" + depends on CAIF + default n + ---help--- + The CAIF Link layer SPI Protocol driver for Slave SPI interface. + This driver implements a platform driver to accommodate for a + platform specific SPI device. A sample CAIF SPI Platform device is + provided in Documentation/networking/caif/spi_porting.txt + +config CAIF_SPI_SYNC + bool "Next command and length in start of frame" + depends on CAIF_SPI_SLAVE + default n + ---help--- + Putting the next command and length in the start of the frame can + help to synchronize to the next transfer in case of over or under-runs. + This option also needs to be enabled on the modem. diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile index 52b6d1f826f893e3d0a3b0582bad15b6ff26b23c..3a11d619452bd1e64bf2dc22d3def54695667a43 100644 --- a/drivers/net/caif/Makefile +++ b/drivers/net/caif/Makefile @@ -1,12 +1,10 @@ -ifeq ($(CONFIG_CAIF_DEBUG),1) -CAIF_DBG_FLAGS := -DDEBUG +ifeq ($(CONFIG_CAIF_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG endif -KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers - -ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS) -clean-dirs:= .tmp_versions -clean-files:= Module.symvers modules.order *.cmd *~ \ - # Serial interface obj-$(CONFIG_CAIF_TTY) += caif_serial.o + +# SPI slave physical interfaces module +cfspi_slave-objs := caif_spi.o caif_spi_slave.o +obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 09257ca8f563b5239a219b7b8c811d8d29fe735b..3df0c0f8b8bf93ca98a8829e7c09162a70f384c4 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -174,6 +174,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data, struct ser_device *ser; int ret; u8 *p; + ser = tty->disc_data; /* @@ -221,6 +222,7 @@ static int handle_tx(struct ser_device *ser) struct tty_struct *tty; struct sk_buff *skb; int tty_wr, len, room; + tty = ser->tty; ser->tx_started = true; @@ -281,6 +283,7 @@ static int handle_tx(struct ser_device *ser) static int caif_xmit(struct sk_buff *skb, struct net_device *dev) { struct ser_device *ser; + BUG_ON(dev == NULL); ser = netdev_priv(dev); @@ -299,6 +302,7 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev) static void ldisc_tx_wakeup(struct tty_struct *tty) { struct ser_device *ser; + ser = tty->disc_data; BUG_ON(ser == NULL); BUG_ON(ser->tty != tty); @@ -348,6 +352,7 @@ static void ldisc_close(struct tty_struct *tty) struct ser_device *ser = tty->disc_data; /* Remove may be called inside or outside of rtnl_lock */ int islocked = rtnl_is_locked(); + if (!islocked) rtnl_lock(); /* device is freed automagically by net-sysfs */ @@ -374,6 +379,7 @@ static struct tty_ldisc_ops caif_ldisc = { static int register_ldisc(void) { int result; + result = tty_register_ldisc(N_CAIF, &caif_ldisc); if (result < 0) { pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, @@ -391,12 +397,12 @@ static const struct net_device_ops netdev_ops = { static void caifdev_setup(struct net_device *dev) { struct ser_device *serdev = netdev_priv(dev); + dev->features = 0; dev->netdev_ops = &netdev_ops; dev->type = ARPHRD_CAIF; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CAIF_MAX_MTU; - dev->hard_header_len = CAIF_NEEDED_HEADROOM; dev->tx_queue_len = 0; dev->destructor = free_netdev; skb_queue_head_init(&serdev->head); @@ -410,8 +416,6 @@ static void caifdev_setup(struct net_device *dev) static int caif_net_open(struct net_device *dev) { - struct ser_device *ser; - ser = netdev_priv(dev); netif_wake_queue(dev); return 0; } @@ -425,6 +429,7 @@ static int caif_net_close(struct net_device *dev) static int __init caif_ser_init(void) { int ret; + ret = register_ldisc(); debugfsdir = debugfs_create_dir("caif_serial", NULL); return ret; @@ -435,6 +440,7 @@ static void __exit caif_ser_exit(void) struct ser_device *ser = NULL; struct list_head *node; struct list_head *_tmp; + list_for_each_safe(node, _tmp, &ser_list) { ser = list_entry(node, struct ser_device, node); dev_close(ser->dev); diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c new file mode 100644 index 0000000000000000000000000000000000000000..f5058ff2b210da07dd1411bdd8a12f1dafe9ddb2 --- /dev/null +++ b/drivers/net/caif/caif_spi.c @@ -0,0 +1,850 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com + * Author: Daniel Martensson / Daniel.Martensson@stericsson.com + * License terms: GNU General Public License (GPL) version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_CAIF_SPI_SYNC +#define FLAVOR "Flavour: Vanilla.\n" +#else +#define FLAVOR "Flavour: Master CMD&LEN at start.\n" +#endif /* CONFIG_CAIF_SPI_SYNC */ + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Daniel Martensson"); +MODULE_DESCRIPTION("CAIF SPI driver"); + +static int spi_loop; +module_param(spi_loop, bool, S_IRUGO); +MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); + +/* SPI frame alignment. */ +module_param(spi_frm_align, int, S_IRUGO); +MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); + +/* SPI padding options. */ +module_param(spi_up_head_align, int, S_IRUGO); +MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); + +module_param(spi_up_tail_align, int, S_IRUGO); +MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment."); + +module_param(spi_down_head_align, int, S_IRUGO); +MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment."); + +module_param(spi_down_tail_align, int, S_IRUGO); +MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment."); + +#ifdef CONFIG_ARM +#define BYTE_HEX_FMT "%02X" +#else +#define BYTE_HEX_FMT "%02hhX" +#endif + +#define SPI_MAX_PAYLOAD_SIZE 4096 +/* + * Threshold values for the SPI packet queue. Flowcontrol will be asserted + * when the number of packets exceeds HIGH_WATER_MARK. It will not be + * deasserted before the number of packets drops below LOW_WATER_MARK. + */ +#define LOW_WATER_MARK 100 +#define HIGH_WATER_MARK (LOW_WATER_MARK*5) + +#ifdef CONFIG_UML + +/* + * We sometimes use UML for debugging, but it cannot handle + * dma_alloc_coherent so we have to wrap it. + */ +static inline void *dma_alloc(dma_addr_t *daddr) +{ + return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL); +} + +static inline void dma_free(void *cpu_addr, dma_addr_t handle) +{ + kfree(cpu_addr); +} + +#else + +static inline void *dma_alloc(dma_addr_t *daddr) +{ + return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr, + GFP_KERNEL); +} + +static inline void dma_free(void *cpu_addr, dma_addr_t handle) +{ + dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle); +} +#endif /* CONFIG_UML */ + +#ifdef CONFIG_DEBUG_FS + +#define DEBUGFS_BUF_SIZE 4096 + +static struct dentry *dbgfs_root; + +static inline void driver_debugfs_create(void) +{ + dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL); +} + +static inline void driver_debugfs_remove(void) +{ + debugfs_remove(dbgfs_root); +} + +static inline void dev_debugfs_rem(struct cfspi *cfspi) +{ + debugfs_remove(cfspi->dbgfs_frame); + debugfs_remove(cfspi->dbgfs_state); + debugfs_remove(cfspi->dbgfs_dir); +} + +static int dbgfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t dbgfs_state(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buf; + int len = 0; + ssize_t size; + struct cfspi *cfspi = file->private_data; + + buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL); + if (!buf) + return 0; + + /* Print out debug information. */ + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "CAIF SPI debug information:\n"); + + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR); + + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "STATE: %d\n", cfspi->dbg_state); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous CMD: 0x%x\n", cfspi->pcmd); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current CMD: 0x%x\n", cfspi->cmd); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous TX len: %d\n", cfspi->tx_ppck_len); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Previous RX len: %d\n", cfspi->rx_ppck_len); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current TX len: %d\n", cfspi->tx_cpck_len); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current RX len: %d\n", cfspi->rx_cpck_len); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Next TX len: %d\n", cfspi->tx_npck_len); + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Next RX len: %d\n", cfspi->rx_npck_len); + + if (len > DEBUGFS_BUF_SIZE) + len = DEBUGFS_BUF_SIZE; + + size = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return size; +} + +static ssize_t print_frame(char *buf, size_t size, char *frm, + size_t count, size_t cut) +{ + int len = 0; + int i; + for (i = 0; i < count; i++) { + len += snprintf((buf + len), (size - len), + "[0x" BYTE_HEX_FMT "]", + frm[i]); + if ((i == cut) && (count > (cut * 2))) { + /* Fast forward. */ + i = count - cut; + len += snprintf((buf + len), (size - len), + "--- %u bytes skipped ---\n", + (int)(count - (cut * 2))); + } + + if ((!(i % 10)) && i) { + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "\n"); + } + } + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n"); + return len; +} + +static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buf; + int len = 0; + ssize_t size; + struct cfspi *cfspi; + + cfspi = file->private_data; + buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL); + if (!buf) + return 0; + + /* Print out debug information. */ + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Current frame:\n"); + + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Tx data (Len: %d):\n", cfspi->tx_cpck_len); + + len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), + cfspi->xfer.va_tx, + (cfspi->tx_cpck_len + SPI_CMD_SZ), 100); + + len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), + "Rx data (Len: %d):\n", cfspi->rx_cpck_len); + + len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len), + cfspi->xfer.va_rx, + (cfspi->rx_cpck_len + SPI_CMD_SZ), 100); + + size = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return size; +} + +static const struct file_operations dbgfs_state_fops = { + .open = dbgfs_open, + .read = dbgfs_state, + .owner = THIS_MODULE +}; + +static const struct file_operations dbgfs_frame_fops = { + .open = dbgfs_open, + .read = dbgfs_frame, + .owner = THIS_MODULE +}; + +static inline void dev_debugfs_add(struct cfspi *cfspi) +{ + cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root); + cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO, + cfspi->dbgfs_dir, cfspi, + &dbgfs_state_fops); + cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO, + cfspi->dbgfs_dir, cfspi, + &dbgfs_frame_fops); +} + +inline void cfspi_dbg_state(struct cfspi *cfspi, int state) +{ + cfspi->dbg_state = state; +}; +#else + +static inline void driver_debugfs_create(void) +{ +} + +static inline void driver_debugfs_remove(void) +{ +} + +static inline void dev_debugfs_add(struct cfspi *cfspi) +{ +} + +static inline void dev_debugfs_rem(struct cfspi *cfspi) +{ +} + +inline void cfspi_dbg_state(struct cfspi *cfspi, int state) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +static LIST_HEAD(cfspi_list); +static spinlock_t cfspi_list_lock; + +/* SPI uplink head alignment. */ +static ssize_t show_up_head_align(struct device_driver *driver, char *buf) +{ + return sprintf(buf, "%d\n", spi_up_head_align); +} + +static DRIVER_ATTR(up_head_align, S_IRUSR, show_up_head_align, NULL); + +/* SPI uplink tail alignment. */ +static ssize_t show_up_tail_align(struct device_driver *driver, char *buf) +{ + return sprintf(buf, "%d\n", spi_up_tail_align); +} + +static DRIVER_ATTR(up_tail_align, S_IRUSR, show_up_tail_align, NULL); + +/* SPI downlink head alignment. */ +static ssize_t show_down_head_align(struct device_driver *driver, char *buf) +{ + return sprintf(buf, "%d\n", spi_down_head_align); +} + +static DRIVER_ATTR(down_head_align, S_IRUSR, show_down_head_align, NULL); + +/* SPI downlink tail alignment. */ +static ssize_t show_down_tail_align(struct device_driver *driver, char *buf) +{ + return sprintf(buf, "%d\n", spi_down_tail_align); +} + +static DRIVER_ATTR(down_tail_align, S_IRUSR, show_down_tail_align, NULL); + +/* SPI frame alignment. */ +static ssize_t show_frame_align(struct device_driver *driver, char *buf) +{ + return sprintf(buf, "%d\n", spi_frm_align); +} + +static DRIVER_ATTR(frame_align, S_IRUSR, show_frame_align, NULL); + +int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len) +{ + u8 *dst = buf; + caif_assert(buf); + + do { + struct sk_buff *skb; + struct caif_payload_info *info; + int spad = 0; + int epad; + + skb = skb_dequeue(&cfspi->chead); + if (!skb) + break; + + /* + * Calculate length of frame including SPI padding. + * The payload position is found in the control buffer. + */ + info = (struct caif_payload_info *)&skb->cb; + + /* + * Compute head offset i.e. number of bytes to add to + * get the start of the payload aligned. + */ + if (spi_up_head_align) { + spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); + *dst = (u8)(spad - 1); + dst += spad; + } + + /* Copy in CAIF frame. */ + skb_copy_bits(skb, 0, dst, skb->len); + dst += skb->len; + cfspi->ndev->stats.tx_packets++; + cfspi->ndev->stats.tx_bytes += skb->len; + + /* + * Compute tail offset i.e. number of bytes to add to + * get the complete CAIF frame aligned. + */ + epad = (skb->len + spad) & spi_up_tail_align; + dst += epad; + + dev_kfree_skb(skb); + + } while ((dst - buf) < len); + + return dst - buf; +} + +int cfspi_xmitlen(struct cfspi *cfspi) +{ + struct sk_buff *skb = NULL; + int frm_len = 0; + int pkts = 0; + + /* + * Decommit previously commited frames. + * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead) + */ + while (skb_peek(&cfspi->chead)) { + skb = skb_dequeue_tail(&cfspi->chead); + skb_queue_head(&cfspi->qhead, skb); + } + + do { + struct caif_payload_info *info = NULL; + int spad = 0; + int epad = 0; + + skb = skb_dequeue(&cfspi->qhead); + if (!skb) + break; + + /* + * Calculate length of frame including SPI padding. + * The payload position is found in the control buffer. + */ + info = (struct caif_payload_info *)&skb->cb; + + /* + * Compute head offset i.e. number of bytes to add to + * get the start of the payload aligned. + */ + if (spi_up_head_align) + spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); + + /* + * Compute tail offset i.e. number of bytes to add to + * get the complete CAIF frame aligned. + */ + epad = (skb->len + spad) & spi_up_tail_align; + + if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { + skb_queue_tail(&cfspi->chead, skb); + pkts++; + frm_len += skb->len + spad + epad; + } else { + /* Put back packet. */ + skb_queue_head(&cfspi->qhead, skb); + } + } while (pkts <= CAIF_MAX_SPI_PKTS); + + /* + * Send flow on if previously sent flow off + * and now go below the low water mark + */ + if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark && + cfspi->cfdev.flowctrl) { + cfspi->flow_off_sent = 0; + cfspi->cfdev.flowctrl(cfspi->ndev, 1); + } + + return frm_len; +} + +static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc) +{ + struct cfspi *cfspi = (struct cfspi *)ifc->priv; + + if (!in_interrupt()) + spin_lock(&cfspi->lock); + if (assert) { + set_bit(SPI_SS_ON, &cfspi->state); + set_bit(SPI_XFER, &cfspi->state); + } else { + set_bit(SPI_SS_OFF, &cfspi->state); + } + if (!in_interrupt()) + spin_unlock(&cfspi->lock); + + /* Wake up the xfer thread. */ + wake_up_interruptible(&cfspi->wait); +} + +static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) +{ + struct cfspi *cfspi = (struct cfspi *)ifc->priv; + + /* Transfer done, complete work queue */ + complete(&cfspi->comp); +} + +static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cfspi *cfspi = NULL; + unsigned long flags; + if (!dev) + return -EINVAL; + + cfspi = netdev_priv(dev); + + skb_queue_tail(&cfspi->qhead, skb); + + spin_lock_irqsave(&cfspi->lock, flags); + if (!test_and_set_bit(SPI_XFER, &cfspi->state)) { + /* Wake up xfer thread. */ + wake_up_interruptible(&cfspi->wait); + } + spin_unlock_irqrestore(&cfspi->lock, flags); + + /* Send flow off if number of bytes is above high water mark */ + if (!cfspi->flow_off_sent && + cfspi->qhead.qlen > cfspi->qd_high_mark && + cfspi->cfdev.flowctrl) { + cfspi->flow_off_sent = 1; + cfspi->cfdev.flowctrl(cfspi->ndev, 0); + } + + return 0; +} + +int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len) +{ + u8 *src = buf; + + caif_assert(buf != NULL); + + do { + int res; + struct sk_buff *skb = NULL; + int spad = 0; + int epad = 0; + u8 *dst = NULL; + int pkt_len = 0; + + /* + * Compute head offset i.e. number of bytes added to + * get the start of the payload aligned. + */ + if (spi_down_head_align) { + spad = 1 + *src; + src += spad; + } + + /* Read length of CAIF frame (little endian). */ + pkt_len = *src; + pkt_len |= ((*(src+1)) << 8) & 0xFF00; + pkt_len += 2; /* Add FCS fields. */ + + /* Get a suitable caif packet and copy in data. */ + + skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1); + caif_assert(skb != NULL); + + dst = skb_put(skb, pkt_len); + memcpy(dst, src, pkt_len); + src += pkt_len; + + skb->protocol = htons(ETH_P_CAIF); + skb_reset_mac_header(skb); + skb->dev = cfspi->ndev; + + /* + * Push received packet up the stack. + */ + if (!spi_loop) + res = netif_rx_ni(skb); + else + res = cfspi_xmit(skb, cfspi->ndev); + + if (!res) { + cfspi->ndev->stats.rx_packets++; + cfspi->ndev->stats.rx_bytes += pkt_len; + } else + cfspi->ndev->stats.rx_dropped++; + + /* + * Compute tail offset i.e. number of bytes added to + * get the complete CAIF frame aligned. + */ + epad = (pkt_len + spad) & spi_down_tail_align; + src += epad; + } while ((src - buf) < len); + + return src - buf; +} + +static int cfspi_open(struct net_device *dev) +{ + netif_wake_queue(dev); + return 0; +} + +static int cfspi_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} +static const struct net_device_ops cfspi_ops = { + .ndo_open = cfspi_open, + .ndo_stop = cfspi_close, + .ndo_start_xmit = cfspi_xmit +}; + +static void cfspi_setup(struct net_device *dev) +{ + struct cfspi *cfspi = netdev_priv(dev); + dev->features = 0; + dev->netdev_ops = &cfspi_ops; + dev->type = ARPHRD_CAIF; + dev->flags = IFF_NOARP | IFF_POINTOPOINT; + dev->tx_queue_len = 0; + dev->mtu = SPI_MAX_PAYLOAD_SIZE; + dev->destructor = free_netdev; + skb_queue_head_init(&cfspi->qhead); + skb_queue_head_init(&cfspi->chead); + cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; + cfspi->cfdev.use_frag = false; + cfspi->cfdev.use_stx = false; + cfspi->cfdev.use_fcs = false; + cfspi->ndev = dev; +} + +int cfspi_spi_probe(struct platform_device *pdev) +{ + struct cfspi *cfspi = NULL; + struct net_device *ndev; + struct cfspi_dev *dev; + int res; + dev = (struct cfspi_dev *)pdev->dev.platform_data; + + ndev = alloc_netdev(sizeof(struct cfspi), + "cfspi%d", cfspi_setup); + if (!dev) + return -ENODEV; + + cfspi = netdev_priv(ndev); + netif_stop_queue(ndev); + cfspi->ndev = ndev; + cfspi->pdev = pdev; + + /* Set flow info */ + cfspi->flow_off_sent = 0; + cfspi->qd_low_mark = LOW_WATER_MARK; + cfspi->qd_high_mark = HIGH_WATER_MARK; + + /* Assign the SPI device. */ + cfspi->dev = dev; + /* Assign the device ifc to this SPI interface. */ + dev->ifc = &cfspi->ifc; + + /* Allocate DMA buffers. */ + cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx); + if (!cfspi->xfer.va_tx) { + printk(KERN_WARNING + "CFSPI: failed to allocate dma TX buffer.\n"); + res = -ENODEV; + goto err_dma_alloc_tx; + } + + cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx); + + if (!cfspi->xfer.va_rx) { + printk(KERN_WARNING + "CFSPI: failed to allocate dma TX buffer.\n"); + res = -ENODEV; + goto err_dma_alloc_rx; + } + + /* Initialize the work queue. */ + INIT_WORK(&cfspi->work, cfspi_xfer); + + /* Initialize spin locks. */ + spin_lock_init(&cfspi->lock); + + /* Initialize flow control state. */ + cfspi->flow_stop = false; + + /* Initialize wait queue. */ + init_waitqueue_head(&cfspi->wait); + + /* Create work thread. */ + cfspi->wq = create_singlethread_workqueue(dev->name); + if (!cfspi->wq) { + printk(KERN_WARNING "CFSPI: failed to create work queue.\n"); + res = -ENODEV; + goto err_create_wq; + } + + /* Initialize work queue. */ + init_completion(&cfspi->comp); + + /* Create debugfs entries. */ + dev_debugfs_add(cfspi); + + /* Set up the ifc. */ + cfspi->ifc.ss_cb = cfspi_ss_cb; + cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb; + cfspi->ifc.priv = cfspi; + + /* Add CAIF SPI device to list. */ + spin_lock(&cfspi_list_lock); + list_add_tail(&cfspi->list, &cfspi_list); + spin_unlock(&cfspi_list_lock); + + /* Schedule the work queue. */ + queue_work(cfspi->wq, &cfspi->work); + + /* Register network device. */ + res = register_netdev(ndev); + if (res) { + printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res); + goto err_net_reg; + } + return res; + + err_net_reg: + dev_debugfs_rem(cfspi); + set_bit(SPI_TERMINATE, &cfspi->state); + wake_up_interruptible(&cfspi->wait); + destroy_workqueue(cfspi->wq); + err_create_wq: + dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + err_dma_alloc_rx: + dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx); + err_dma_alloc_tx: + free_netdev(ndev); + + return res; +} + +int cfspi_spi_remove(struct platform_device *pdev) +{ + struct list_head *list_node; + struct list_head *n; + struct cfspi *cfspi = NULL; + struct cfspi_dev *dev; + + dev = (struct cfspi_dev *)pdev->dev.platform_data; + spin_lock(&cfspi_list_lock); + list_for_each_safe(list_node, n, &cfspi_list) { + cfspi = list_entry(list_node, struct cfspi, list); + /* Find the corresponding device. */ + if (cfspi->dev == dev) { + /* Remove from list. */ + list_del(list_node); + /* Free DMA buffers. */ + dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx); + set_bit(SPI_TERMINATE, &cfspi->state); + wake_up_interruptible(&cfspi->wait); + destroy_workqueue(cfspi->wq); + /* Destroy debugfs directory and files. */ + dev_debugfs_rem(cfspi); + unregister_netdev(cfspi->ndev); + spin_unlock(&cfspi_list_lock); + return 0; + } + } + spin_unlock(&cfspi_list_lock); + return -ENODEV; +} + +static void __exit cfspi_exit_module(void) +{ + struct list_head *list_node; + struct list_head *n; + struct cfspi *cfspi = NULL; + + list_for_each_safe(list_node, n, &cfspi_list) { + cfspi = list_entry(list_node, struct cfspi, list); + platform_device_unregister(cfspi->pdev); + } + + /* Destroy sysfs files. */ + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_up_head_align); + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_up_tail_align); + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_down_head_align); + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_down_tail_align); + driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align); + /* Unregister platform driver. */ + platform_driver_unregister(&cfspi_spi_driver); + /* Destroy debugfs root directory. */ + driver_debugfs_remove(); +} + +static int __init cfspi_init_module(void) +{ + int result; + + /* Initialize spin lock. */ + spin_lock_init(&cfspi_list_lock); + + /* Register platform driver. */ + result = platform_driver_register(&cfspi_spi_driver); + if (result) { + printk(KERN_ERR "Could not register platform SPI driver.\n"); + goto err_dev_register; + } + + /* Create sysfs files. */ + result = + driver_create_file(&cfspi_spi_driver.driver, + &driver_attr_up_head_align); + if (result) { + printk(KERN_ERR "Sysfs creation failed 1.\n"); + goto err_create_up_head_align; + } + + result = + driver_create_file(&cfspi_spi_driver.driver, + &driver_attr_up_tail_align); + if (result) { + printk(KERN_ERR "Sysfs creation failed 2.\n"); + goto err_create_up_tail_align; + } + + result = + driver_create_file(&cfspi_spi_driver.driver, + &driver_attr_down_head_align); + if (result) { + printk(KERN_ERR "Sysfs creation failed 3.\n"); + goto err_create_down_head_align; + } + + result = + driver_create_file(&cfspi_spi_driver.driver, + &driver_attr_down_tail_align); + if (result) { + printk(KERN_ERR "Sysfs creation failed 4.\n"); + goto err_create_down_tail_align; + } + + result = + driver_create_file(&cfspi_spi_driver.driver, + &driver_attr_frame_align); + if (result) { + printk(KERN_ERR "Sysfs creation failed 5.\n"); + goto err_create_frame_align; + } + driver_debugfs_create(); + return result; + + err_create_frame_align: + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_down_tail_align); + err_create_down_tail_align: + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_down_head_align); + err_create_down_head_align: + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_up_tail_align); + err_create_up_tail_align: + driver_remove_file(&cfspi_spi_driver.driver, + &driver_attr_up_head_align); + err_create_up_head_align: + err_dev_register: + return result; +} + +module_init(cfspi_init_module); +module_exit(cfspi_exit_module); diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c new file mode 100644 index 0000000000000000000000000000000000000000..077ccf840edf24dc46107334c39c5758de248465 --- /dev/null +++ b/drivers/net/caif/caif_spi_slave.c @@ -0,0 +1,252 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com + * Author: Daniel Martensson / Daniel.Martensson@stericsson.com + * License terms: GNU General Public License (GPL) version 2. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_CAIF_SPI_SYNC +#define SPI_DATA_POS SPI_CMD_SZ +static inline int forward_to_spi_cmd(struct cfspi *cfspi) +{ + return cfspi->rx_cpck_len; +} +#else +#define SPI_DATA_POS 0 +static inline int forward_to_spi_cmd(struct cfspi *cfspi) +{ + return 0; +} +#endif + +int spi_frm_align = 2; +int spi_up_head_align = 1; +int spi_up_tail_align; +int spi_down_head_align = 3; +int spi_down_tail_align = 1; + +#ifdef CONFIG_DEBUG_FS +static inline void debugfs_store_prev(struct cfspi *cfspi) +{ + /* Store previous command for debugging reasons.*/ + cfspi->pcmd = cfspi->cmd; + /* Store previous transfer. */ + cfspi->tx_ppck_len = cfspi->tx_cpck_len; + cfspi->rx_ppck_len = cfspi->rx_cpck_len; +} +#else +static inline void debugfs_store_prev(struct cfspi *cfspi) +{ +} +#endif + +void cfspi_xfer(struct work_struct *work) +{ + struct cfspi *cfspi; + u8 *ptr = NULL; + unsigned long flags; + int ret; + cfspi = container_of(work, struct cfspi, work); + + /* Initialize state. */ + cfspi->cmd = SPI_CMD_EOT; + + for (;;) { + + cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING); + + /* Wait for master talk or transmit event. */ + wait_event_interruptible(cfspi->wait, + test_bit(SPI_XFER, &cfspi->state) || + test_bit(SPI_TERMINATE, &cfspi->state)); + + if (test_bit(SPI_TERMINATE, &cfspi->state)) + return; + +#if CFSPI_DBG_PREFILL + /* Prefill buffers for easier debugging. */ + memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN); + memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN); +#endif /* CFSPI_DBG_PREFILL */ + + cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE); + + /* Check whether we have a committed frame. */ + if (cfspi->tx_cpck_len) { + int len; + + cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT); + + /* Copy commited SPI frames after the SPI indication. */ + ptr = (u8 *) cfspi->xfer.va_tx; + ptr += SPI_IND_SZ; + len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len); + WARN_ON(len != cfspi->tx_cpck_len); + } + + cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT); + + /* Get length of next frame to commit. */ + cfspi->tx_npck_len = cfspi_xmitlen(cfspi); + + WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN); + + /* + * Add indication and length at the beginning of the frame, + * using little endian. + */ + ptr = (u8 *) cfspi->xfer.va_tx; + *ptr++ = SPI_CMD_IND; + *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8; + *ptr++ = cfspi->tx_npck_len & 0x00FF; + *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8; + + /* Calculate length of DMAs. */ + cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ; + cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ; + + /* Add SPI TX frame alignment padding, if necessary. */ + if (cfspi->tx_cpck_len && + (cfspi->xfer.tx_dma_len % spi_frm_align)) { + + cfspi->xfer.tx_dma_len += spi_frm_align - + (cfspi->xfer.tx_dma_len % spi_frm_align); + } + + /* Add SPI RX frame alignment padding, if necessary. */ + if (cfspi->rx_cpck_len && + (cfspi->xfer.rx_dma_len % spi_frm_align)) { + + cfspi->xfer.rx_dma_len += spi_frm_align - + (cfspi->xfer.rx_dma_len % spi_frm_align); + } + + cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER); + + /* Start transfer. */ + ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev); + WARN_ON(ret); + + cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE); + + /* + * TODO: We might be able to make an assumption if this is the + * first loop. Make sure that minimum toggle time is respected. + */ + udelay(MIN_TRANSITION_TIME_USEC); + + cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE); + + /* Signal that we are ready to recieve data. */ + cfspi->dev->sig_xfer(true, cfspi->dev); + + cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE); + + /* Wait for transfer completion. */ + wait_for_completion(&cfspi->comp); + + cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE); + + if (cfspi->cmd == SPI_CMD_EOT) { + /* + * Clear the master talk bit. A xfer is always at + * least two bursts. + */ + clear_bit(SPI_SS_ON, &cfspi->state); + } + + cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE); + + /* Make sure that the minimum toggle time is respected. */ + if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len, + cfspi->dev->clk_mhz) < + MIN_TRANSITION_TIME_USEC) { + + udelay(MIN_TRANSITION_TIME_USEC - + SPI_XFER_TIME_USEC + (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz)); + } + + cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE); + + /* De-assert transfer signal. */ + cfspi->dev->sig_xfer(false, cfspi->dev); + + /* Check whether we received a CAIF packet. */ + if (cfspi->rx_cpck_len) { + int len; + + cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT); + + /* Parse SPI frame. */ + ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS)); + + len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len); + WARN_ON(len != cfspi->rx_cpck_len); + } + + /* Check the next SPI command and length. */ + ptr = (u8 *) cfspi->xfer.va_rx; + + ptr += forward_to_spi_cmd(cfspi); + + cfspi->cmd = *ptr++; + cfspi->cmd |= ((*ptr++) << 8) & 0xFF00; + cfspi->rx_npck_len = *ptr++; + cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00; + + WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN); + WARN_ON(cfspi->cmd > SPI_CMD_EOT); + + debugfs_store_prev(cfspi); + + /* Check whether the master issued an EOT command. */ + if (cfspi->cmd == SPI_CMD_EOT) { + /* Reset state. */ + cfspi->tx_cpck_len = 0; + cfspi->rx_cpck_len = 0; + } else { + /* Update state. */ + cfspi->tx_cpck_len = cfspi->tx_npck_len; + cfspi->rx_cpck_len = cfspi->rx_npck_len; + } + + /* + * Check whether we need to clear the xfer bit. + * Spin lock needed for packet insertion. + * Test and clear of different bits + * are not supported. + */ + spin_lock_irqsave(&cfspi->lock, flags); + if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi) + && !test_bit(SPI_SS_ON, &cfspi->state)) + clear_bit(SPI_XFER, &cfspi->state); + + spin_unlock_irqrestore(&cfspi->lock, flags); + } +} + +struct platform_driver cfspi_spi_driver = { + .probe = cfspi_spi_probe, + .remove = cfspi_spi_remove, + .driver = { + .name = "cfspi_sspi", + .owner = THIS_MODULE, + }, +}; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 2c5227c02fa02c2ec00fee49ec550abf042d93f0..9d9e45394433f134ef8a344e69fe1d42039e7fcc 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -73,6 +73,15 @@ config CAN_JANZ_ICAN3 This driver can also be built as a module. If so, the module will be called janz-ican3.ko. +config HAVE_CAN_FLEXCAN + bool + +config CAN_FLEXCAN + tristate "Support for Freescale FLEXCAN based chips" + depends on CAN_DEV && HAVE_CAN_FLEXCAN + ---help--- + Say Y here if you want to support for Freescale FlexCAN. + source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/sja1000/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 9047cd066fea2d36b06d624765dc25b9e3bc6f98..00575373bbd0eed976566419be2ed9c9e34c8cf2 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -16,5 +16,6 @@ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o obj-$(CONFIG_CAN_BFIN) += bfin_can.o obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o +obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c new file mode 100644 index 0000000000000000000000000000000000000000..ef443a090ba7cc53a8db4989095bfd888712890c --- /dev/null +++ b/drivers/net/can/flexcan.c @@ -0,0 +1,1030 @@ +/* + * flexcan.c - FLEXCAN CAN controller driver + * + * Copyright (c) 2005-2006 Varma Electronics Oy + * Copyright (c) 2009 Sascha Hauer, Pengutronix + * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix + * + * Based on code originally by Andrey Volkov + * + * LICENCE: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRV_NAME "flexcan" + +/* 8 for RX fifo and 2 error handling */ +#define FLEXCAN_NAPI_WEIGHT (8 + 2) + +/* FLEXCAN module configuration register (CANMCR) bits */ +#define FLEXCAN_MCR_MDIS BIT(31) +#define FLEXCAN_MCR_FRZ BIT(30) +#define FLEXCAN_MCR_FEN BIT(29) +#define FLEXCAN_MCR_HALT BIT(28) +#define FLEXCAN_MCR_NOT_RDY BIT(27) +#define FLEXCAN_MCR_WAK_MSK BIT(26) +#define FLEXCAN_MCR_SOFTRST BIT(25) +#define FLEXCAN_MCR_FRZ_ACK BIT(24) +#define FLEXCAN_MCR_SUPV BIT(23) +#define FLEXCAN_MCR_SLF_WAK BIT(22) +#define FLEXCAN_MCR_WRN_EN BIT(21) +#define FLEXCAN_MCR_LPM_ACK BIT(20) +#define FLEXCAN_MCR_WAK_SRC BIT(19) +#define FLEXCAN_MCR_DOZE BIT(18) +#define FLEXCAN_MCR_SRX_DIS BIT(17) +#define FLEXCAN_MCR_BCC BIT(16) +#define FLEXCAN_MCR_LPRIO_EN BIT(13) +#define FLEXCAN_MCR_AEN BIT(12) +#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf) +#define FLEXCAN_MCR_IDAM_A (0 << 8) +#define FLEXCAN_MCR_IDAM_B (1 << 8) +#define FLEXCAN_MCR_IDAM_C (2 << 8) +#define FLEXCAN_MCR_IDAM_D (3 << 8) + +/* FLEXCAN control register (CANCTRL) bits */ +#define FLEXCAN_CTRL_PRESDIV(x) (((x) & 0xff) << 24) +#define FLEXCAN_CTRL_RJW(x) (((x) & 0x03) << 22) +#define FLEXCAN_CTRL_PSEG1(x) (((x) & 0x07) << 19) +#define FLEXCAN_CTRL_PSEG2(x) (((x) & 0x07) << 16) +#define FLEXCAN_CTRL_BOFF_MSK BIT(15) +#define FLEXCAN_CTRL_ERR_MSK BIT(14) +#define FLEXCAN_CTRL_CLK_SRC BIT(13) +#define FLEXCAN_CTRL_LPB BIT(12) +#define FLEXCAN_CTRL_TWRN_MSK BIT(11) +#define FLEXCAN_CTRL_RWRN_MSK BIT(10) +#define FLEXCAN_CTRL_SMP BIT(7) +#define FLEXCAN_CTRL_BOFF_REC BIT(6) +#define FLEXCAN_CTRL_TSYN BIT(5) +#define FLEXCAN_CTRL_LBUF BIT(4) +#define FLEXCAN_CTRL_LOM BIT(3) +#define FLEXCAN_CTRL_PROPSEG(x) ((x) & 0x07) +#define FLEXCAN_CTRL_ERR_BUS (FLEXCAN_CTRL_ERR_MSK) +#define FLEXCAN_CTRL_ERR_STATE \ + (FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \ + FLEXCAN_CTRL_BOFF_MSK) +#define FLEXCAN_CTRL_ERR_ALL \ + (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) + +/* FLEXCAN error and status register (ESR) bits */ +#define FLEXCAN_ESR_TWRN_INT BIT(17) +#define FLEXCAN_ESR_RWRN_INT BIT(16) +#define FLEXCAN_ESR_BIT1_ERR BIT(15) +#define FLEXCAN_ESR_BIT0_ERR BIT(14) +#define FLEXCAN_ESR_ACK_ERR BIT(13) +#define FLEXCAN_ESR_CRC_ERR BIT(12) +#define FLEXCAN_ESR_FRM_ERR BIT(11) +#define FLEXCAN_ESR_STF_ERR BIT(10) +#define FLEXCAN_ESR_TX_WRN BIT(9) +#define FLEXCAN_ESR_RX_WRN BIT(8) +#define FLEXCAN_ESR_IDLE BIT(7) +#define FLEXCAN_ESR_TXRX BIT(6) +#define FLEXCAN_EST_FLT_CONF_SHIFT (4) +#define FLEXCAN_ESR_FLT_CONF_MASK (0x3 << FLEXCAN_EST_FLT_CONF_SHIFT) +#define FLEXCAN_ESR_FLT_CONF_ACTIVE (0x0 << FLEXCAN_EST_FLT_CONF_SHIFT) +#define FLEXCAN_ESR_FLT_CONF_PASSIVE (0x1 << FLEXCAN_EST_FLT_CONF_SHIFT) +#define FLEXCAN_ESR_BOFF_INT BIT(2) +#define FLEXCAN_ESR_ERR_INT BIT(1) +#define FLEXCAN_ESR_WAK_INT BIT(0) +#define FLEXCAN_ESR_ERR_BUS \ + (FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \ + FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \ + FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR) +#define FLEXCAN_ESR_ERR_STATE \ + (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT) +#define FLEXCAN_ESR_ERR_ALL \ + (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) + +/* FLEXCAN interrupt flag register (IFLAG) bits */ +#define FLEXCAN_TX_BUF_ID 8 +#define FLEXCAN_IFLAG_BUF(x) BIT(x) +#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) +#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) +#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) +#define FLEXCAN_IFLAG_DEFAULT \ + (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \ + FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID)) + +/* FLEXCAN message buffers */ +#define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) +#define FLEXCAN_MB_CNT_SRR BIT(22) +#define FLEXCAN_MB_CNT_IDE BIT(21) +#define FLEXCAN_MB_CNT_RTR BIT(20) +#define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) +#define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) + +#define FLEXCAN_MB_CODE_MASK (0xf0ffffff) + +/* Structure of the message buffer */ +struct flexcan_mb { + u32 can_ctrl; + u32 can_id; + u32 data[2]; +}; + +/* Structure of the hardware registers */ +struct flexcan_regs { + u32 mcr; /* 0x00 */ + u32 ctrl; /* 0x04 */ + u32 timer; /* 0x08 */ + u32 _reserved1; /* 0x0c */ + u32 rxgmask; /* 0x10 */ + u32 rx14mask; /* 0x14 */ + u32 rx15mask; /* 0x18 */ + u32 ecr; /* 0x1c */ + u32 esr; /* 0x20 */ + u32 imask2; /* 0x24 */ + u32 imask1; /* 0x28 */ + u32 iflag2; /* 0x2c */ + u32 iflag1; /* 0x30 */ + u32 _reserved2[19]; + struct flexcan_mb cantxfg[64]; +}; + +struct flexcan_priv { + struct can_priv can; + struct net_device *dev; + struct napi_struct napi; + + void __iomem *base; + u32 reg_esr; + u32 reg_ctrl_default; + + struct clk *clk; + struct flexcan_platform_data *pdata; +}; + +static struct can_bittiming_const flexcan_bittiming_const = { + .name = DRV_NAME, + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* + * Swtich transceiver on or off + */ +static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on) +{ + if (priv->pdata && priv->pdata->transceiver_switch) + priv->pdata->transceiver_switch(on); +} + +static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, + u32 reg_esr) +{ + return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + (reg_esr & FLEXCAN_ESR_ERR_BUS); +} + +static inline void flexcan_chip_enable(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->base; + u32 reg; + + reg = readl(®s->mcr); + reg &= ~FLEXCAN_MCR_MDIS; + writel(reg, ®s->mcr); + + udelay(10); +} + +static inline void flexcan_chip_disable(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->base; + u32 reg; + + reg = readl(®s->mcr); + reg |= FLEXCAN_MCR_MDIS; + writel(reg, ®s->mcr); +} + +static int flexcan_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->base; + u32 reg = readl(®s->ecr); + + bec->txerr = (reg >> 0) & 0xff; + bec->rxerr = (reg >> 8) & 0xff; + + return 0; +} + +static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct flexcan_regs __iomem *regs = priv->base; + struct can_frame *cf = (struct can_frame *)skb->data; + u32 can_id; + u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16); + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + netif_stop_queue(dev); + + if (cf->can_id & CAN_EFF_FLAG) { + can_id = cf->can_id & CAN_EFF_MASK; + ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR; + } else { + can_id = (cf->can_id & CAN_SFF_MASK) << 18; + } + + if (cf->can_id & CAN_RTR_FLAG) + ctrl |= FLEXCAN_MB_CNT_RTR; + + if (cf->can_dlc > 0) { + u32 data = be32_to_cpup((__be32 *)&cf->data[0]); + writel(data, ®s->cantxfg[FLEXCAN_TX_BUF_ID].data[0]); + } + if (cf->can_dlc > 3) { + u32 data = be32_to_cpup((__be32 *)&cf->data[4]); + writel(data, ®s->cantxfg[FLEXCAN_TX_BUF_ID].data[1]); + } + + writel(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); + writel(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); + + kfree_skb(skb); + + /* tx_packets is incremented in flexcan_irq */ + stats->tx_bytes += cf->can_dlc; + + return NETDEV_TX_OK; +} + +static void do_bus_err(struct net_device *dev, + struct can_frame *cf, u32 reg_esr) +{ + struct flexcan_priv *priv = netdev_priv(dev); + int rx_errors = 0, tx_errors = 0; + + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { + dev_dbg(dev->dev.parent, "BIT1_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_BIT1; + tx_errors = 1; + } + if (reg_esr & FLEXCAN_ESR_BIT0_ERR) { + dev_dbg(dev->dev.parent, "BIT0_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_BIT0; + tx_errors = 1; + } + if (reg_esr & FLEXCAN_ESR_ACK_ERR) { + dev_dbg(dev->dev.parent, "ACK_ERR irq\n"); + cf->can_id |= CAN_ERR_ACK; + cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + tx_errors = 1; + } + if (reg_esr & FLEXCAN_ESR_CRC_ERR) { + dev_dbg(dev->dev.parent, "CRC_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_BIT; + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + rx_errors = 1; + } + if (reg_esr & FLEXCAN_ESR_FRM_ERR) { + dev_dbg(dev->dev.parent, "FRM_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_FORM; + rx_errors = 1; + } + if (reg_esr & FLEXCAN_ESR_STF_ERR) { + dev_dbg(dev->dev.parent, "STF_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_STUFF; + rx_errors = 1; + } + + priv->can.can_stats.bus_error++; + if (rx_errors) + dev->stats.rx_errors++; + if (tx_errors) + dev->stats.tx_errors++; +} + +static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) +{ + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + do_bus_err(dev, cf, reg_esr); + netif_receive_skb(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += cf->can_dlc; + + return 1; +} + +static void do_state(struct net_device *dev, + struct can_frame *cf, enum can_state new_state) +{ + struct flexcan_priv *priv = netdev_priv(dev); + struct can_berr_counter bec; + + flexcan_get_berr_counter(dev, &bec); + + switch (priv->can.state) { + case CAN_STATE_ERROR_ACTIVE: + /* + * from: ERROR_ACTIVE + * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF + * => : there was a warning int + */ + if (new_state >= CAN_STATE_ERROR_WARNING && + new_state <= CAN_STATE_BUS_OFF) { + dev_dbg(dev->dev.parent, "Error Warning IRQ\n"); + priv->can.can_stats.error_warning++; + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + } + case CAN_STATE_ERROR_WARNING: /* fallthrough */ + /* + * from: ERROR_ACTIVE, ERROR_WARNING + * to : ERROR_PASSIVE, BUS_OFF + * => : error passive int + */ + if (new_state >= CAN_STATE_ERROR_PASSIVE && + new_state <= CAN_STATE_BUS_OFF) { + dev_dbg(dev->dev.parent, "Error Passive IRQ\n"); + priv->can.can_stats.error_passive++; + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + } + break; + case CAN_STATE_BUS_OFF: + dev_err(dev->dev.parent, + "BUG! hardware recovered automatically from BUS_OFF\n"); + break; + default: + break; + } + + /* process state changes depending on the new state */ + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + dev_dbg(dev->dev.parent, "Error Active\n"); + cf->can_id |= CAN_ERR_PROT; + cf->data[2] = CAN_ERR_PROT_ACTIVE; + break; + case CAN_STATE_BUS_OFF: + cf->can_id |= CAN_ERR_BUSOFF; + can_bus_off(dev); + break; + default: + break; + } +} + +static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) +{ + struct flexcan_priv *priv = netdev_priv(dev); + struct sk_buff *skb; + struct can_frame *cf; + enum can_state new_state; + int flt; + + flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; + if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { + if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN | + FLEXCAN_ESR_RX_WRN)))) + new_state = CAN_STATE_ERROR_ACTIVE; + else + new_state = CAN_STATE_ERROR_WARNING; + } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) + new_state = CAN_STATE_ERROR_PASSIVE; + else + new_state = CAN_STATE_BUS_OFF; + + /* state hasn't changed */ + if (likely(new_state == priv->can.state)) + return 0; + + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + do_state(dev, cf, new_state); + priv->can.state = new_state; + netif_receive_skb(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += cf->can_dlc; + + return 1; +} + +static void flexcan_read_fifo(const struct net_device *dev, + struct can_frame *cf) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->base; + struct flexcan_mb __iomem *mb = ®s->cantxfg[0]; + u32 reg_ctrl, reg_id; + + reg_ctrl = readl(&mb->can_ctrl); + reg_id = readl(&mb->can_id); + if (reg_ctrl & FLEXCAN_MB_CNT_IDE) + cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (reg_id >> 18) & CAN_SFF_MASK; + + if (reg_ctrl & FLEXCAN_MB_CNT_RTR) + cf->can_id |= CAN_RTR_FLAG; + cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf); + + *(__be32 *)(cf->data + 0) = cpu_to_be32(readl(&mb->data[0])); + *(__be32 *)(cf->data + 4) = cpu_to_be32(readl(&mb->data[1])); + + /* mark as read */ + writel(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); + readl(®s->timer); +} + +static int flexcan_read_frame(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + skb = alloc_can_skb(dev, &cf); + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + flexcan_read_fifo(dev, cf); + netif_receive_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + return 1; +} + +static int flexcan_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->base; + u32 reg_iflag1, reg_esr; + int work_done = 0; + + /* + * The error bits are cleared on read, + * use saved value from irq handler. + */ + reg_esr = readl(®s->esr) | priv->reg_esr; + + /* handle state changes */ + work_done += flexcan_poll_state(dev, reg_esr); + + /* handle RX-FIFO */ + reg_iflag1 = readl(®s->iflag1); + while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE && + work_done < quota) { + work_done += flexcan_read_frame(dev); + reg_iflag1 = readl(®s->iflag1); + } + + /* report bus errors */ + if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota) + work_done += flexcan_poll_bus_err(dev, reg_esr); + + if (work_done < quota) { + napi_complete(napi); + /* enable IRQs */ + writel(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); + writel(priv->reg_ctrl_default, ®s->ctrl); + } + + return work_done; +} + +static irqreturn_t flexcan_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct net_device_stats *stats = &dev->stats; + struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->base; + u32 reg_iflag1, reg_esr; + + reg_iflag1 = readl(®s->iflag1); + reg_esr = readl(®s->esr); + writel(FLEXCAN_ESR_ERR_INT, ®s->esr); /* ACK err IRQ */ + + /* + * schedule NAPI in case of: + * - rx IRQ + * - state change IRQ + * - bus error IRQ and bus error reporting is activated + */ + if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) || + (reg_esr & FLEXCAN_ESR_ERR_STATE) || + flexcan_has_and_handle_berr(priv, reg_esr)) { + /* + * The error bits are cleared on read, + * save them for later use. + */ + priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS; + writel(FLEXCAN_IFLAG_DEFAULT & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, + ®s->imask1); + writel(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, + ®s->ctrl); + napi_schedule(&priv->napi); + } + + /* FIFO overflow */ + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { + writel(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } + + /* transmission complete interrupt */ + if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) { + /* tx_bytes is incremented in flexcan_start_xmit */ + stats->tx_packets++; + writel((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); + netif_wake_queue(dev); + } + + return IRQ_HANDLED; +} + +static void flexcan_set_bittiming(struct net_device *dev) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + const struct can_bittiming *bt = &priv->can.bittiming; + struct flexcan_regs __iomem *regs = priv->base; + u32 reg; + + reg = readl(®s->ctrl); + reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) | + FLEXCAN_CTRL_RJW(0x3) | + FLEXCAN_CTRL_PSEG1(0x7) | + FLEXCAN_CTRL_PSEG2(0x7) | + FLEXCAN_CTRL_PROPSEG(0x7) | + FLEXCAN_CTRL_LPB | + FLEXCAN_CTRL_SMP | + FLEXCAN_CTRL_LOM); + + reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) | + FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) | + FLEXCAN_CTRL_PSEG2(bt->phase_seg2 - 1) | + FLEXCAN_CTRL_RJW(bt->sjw - 1) | + FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1); + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + reg |= FLEXCAN_CTRL_LPB; + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + reg |= FLEXCAN_CTRL_LOM; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + reg |= FLEXCAN_CTRL_SMP; + + dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg); + writel(reg, ®s->ctrl); + + /* print chip status */ + dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, + readl(®s->mcr), readl(®s->ctrl)); +} + +/* + * flexcan_chip_start + * + * this functions is entered with clocks enabled + * + */ +static int flexcan_chip_start(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->base; + unsigned int i; + int err; + u32 reg_mcr, reg_ctrl; + + /* enable module */ + flexcan_chip_enable(priv); + + /* soft reset */ + writel(FLEXCAN_MCR_SOFTRST, ®s->mcr); + udelay(10); + + reg_mcr = readl(®s->mcr); + if (reg_mcr & FLEXCAN_MCR_SOFTRST) { + dev_err(dev->dev.parent, + "Failed to softreset can module (mcr=0x%08x)\n", + reg_mcr); + err = -ENODEV; + goto out; + } + + flexcan_set_bittiming(dev); + + /* + * MCR + * + * enable freeze + * enable fifo + * halt now + * only supervisor access + * enable warning int + * choose format C + * + */ + reg_mcr = readl(®s->mcr); + reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | + FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | + FLEXCAN_MCR_IDAM_C; + dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr); + writel(reg_mcr, ®s->mcr); + + /* + * CTRL + * + * disable timer sync feature + * + * disable auto busoff recovery + * transmit lowest buffer first + * + * enable tx and rx warning interrupt + * enable bus off interrupt + * (== FLEXCAN_CTRL_ERR_STATE) + * + * _note_: we enable the "error interrupt" + * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any + * warning or bus passive interrupts. + */ + reg_ctrl = readl(®s->ctrl); + reg_ctrl &= ~FLEXCAN_CTRL_TSYN; + reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | + FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK; + + /* save for later use */ + priv->reg_ctrl_default = reg_ctrl; + dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); + writel(reg_ctrl, ®s->ctrl); + + for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) { + writel(0, ®s->cantxfg[i].can_ctrl); + writel(0, ®s->cantxfg[i].can_id); + writel(0, ®s->cantxfg[i].data[0]); + writel(0, ®s->cantxfg[i].data[1]); + + /* put MB into rx queue */ + writel(FLEXCAN_MB_CNT_CODE(0x4), ®s->cantxfg[i].can_ctrl); + } + + /* acceptance mask/acceptance code (accept everything) */ + writel(0x0, ®s->rxgmask); + writel(0x0, ®s->rx14mask); + writel(0x0, ®s->rx15mask); + + flexcan_transceiver_switch(priv, 1); + + /* synchronize with the can bus */ + reg_mcr = readl(®s->mcr); + reg_mcr &= ~FLEXCAN_MCR_HALT; + writel(reg_mcr, ®s->mcr); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + /* enable FIFO interrupts */ + writel(FLEXCAN_IFLAG_DEFAULT, ®s->imask1); + + /* print chip status */ + dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n", + __func__, readl(®s->mcr), readl(®s->ctrl)); + + return 0; + + out: + flexcan_chip_disable(priv); + return err; +} + +/* + * flexcan_chip_stop + * + * this functions is entered with clocks enabled + * + */ +static void flexcan_chip_stop(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->base; + u32 reg; + + /* Disable all interrupts */ + writel(0, ®s->imask1); + + /* Disable + halt module */ + reg = readl(®s->mcr); + reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; + writel(reg, ®s->mcr); + + flexcan_transceiver_switch(priv, 0); + priv->can.state = CAN_STATE_STOPPED; + + return; +} + +static int flexcan_open(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + int err; + + clk_enable(priv->clk); + + err = open_candev(dev); + if (err) + goto out; + + err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_close; + + /* start chip and queuing */ + err = flexcan_chip_start(dev); + if (err) + goto out_close; + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + + out_close: + close_candev(dev); + out: + clk_disable(priv->clk); + + return err; +} + +static int flexcan_close(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + flexcan_chip_stop(dev); + + free_irq(dev->irq, dev); + clk_disable(priv->clk); + + close_candev(dev); + + return 0; +} + +static int flexcan_set_mode(struct net_device *dev, enum can_mode mode) +{ + int err; + + switch (mode) { + case CAN_MODE_START: + err = flexcan_chip_start(dev); + if (err) + return err; + + netif_wake_queue(dev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct net_device_ops flexcan_netdev_ops = { + .ndo_open = flexcan_open, + .ndo_stop = flexcan_close, + .ndo_start_xmit = flexcan_start_xmit, +}; + +static int __devinit register_flexcandev(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->base; + u32 reg, err; + + clk_enable(priv->clk); + + /* select "bus clock", chip must be disabled */ + flexcan_chip_disable(priv); + reg = readl(®s->ctrl); + reg |= FLEXCAN_CTRL_CLK_SRC; + writel(reg, ®s->ctrl); + + flexcan_chip_enable(priv); + + /* set freeze, halt and activate FIFO, restrict register access */ + reg = readl(®s->mcr); + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | + FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + writel(reg, ®s->mcr); + + /* + * Currently we only support newer versions of this core + * featuring a RX FIFO. Older cores found on some Coldfire + * derivates are not yet supported. + */ + reg = readl(®s->mcr); + if (!(reg & FLEXCAN_MCR_FEN)) { + dev_err(dev->dev.parent, + "Could not enable RX FIFO, unsupported core\n"); + err = -ENODEV; + goto out; + } + + err = register_candev(dev); + + out: + /* disable core and turn off clocks */ + flexcan_chip_disable(priv); + clk_disable(priv->clk); + + return err; +} + +static void __devexit unregister_flexcandev(struct net_device *dev) +{ + unregister_candev(dev); +} + +static int __devinit flexcan_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct flexcan_priv *priv; + struct resource *mem; + struct clk *clk; + void __iomem *base; + resource_size_t mem_size; + int err, irq; + + clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "no clock defined\n"); + err = PTR_ERR(clk); + goto failed_clock; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!mem || irq <= 0) { + err = -ENODEV; + goto failed_get; + } + + mem_size = resource_size(mem); + if (!request_mem_region(mem->start, mem_size, pdev->name)) { + err = -EBUSY; + goto failed_req; + } + + base = ioremap(mem->start, mem_size); + if (!base) { + err = -ENOMEM; + goto failed_map; + } + + dev = alloc_candev(sizeof(struct flexcan_priv), 0); + if (!dev) { + err = -ENOMEM; + goto failed_alloc; + } + + dev->netdev_ops = &flexcan_netdev_ops; + dev->irq = irq; + dev->flags |= IFF_ECHO; /* we support local echo in hardware */ + + priv = netdev_priv(dev); + priv->can.clock.freq = clk_get_rate(clk); + priv->can.bittiming_const = &flexcan_bittiming_const; + priv->can.do_set_mode = flexcan_set_mode; + priv->can.do_get_berr_counter = flexcan_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_BERR_REPORTING; + priv->base = base; + priv->dev = dev; + priv->clk = clk; + priv->pdata = pdev->dev.platform_data; + + netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); + + dev_set_drvdata(&pdev->dev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + err = register_flexcandev(dev); + if (err) { + dev_err(&pdev->dev, "registering netdev failed\n"); + goto failed_register; + } + + dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", + priv->base, dev->irq); + + return 0; + + failed_register: + free_candev(dev); + failed_alloc: + iounmap(base); + failed_map: + release_mem_region(mem->start, mem_size); + failed_req: + clk_put(clk); + failed_get: + failed_clock: + return err; +} + +static int __devexit flexcan_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct flexcan_priv *priv = netdev_priv(dev); + struct resource *mem; + + unregister_flexcandev(dev); + platform_set_drvdata(pdev, NULL); + free_candev(dev); + iounmap(priv->base); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + + clk_put(priv->clk); + + return 0; +} + +static struct platform_driver flexcan_driver = { + .driver.name = DRV_NAME, + .probe = flexcan_probe, + .remove = __devexit_p(flexcan_remove), +}; + +static int __init flexcan_init(void) +{ + pr_info("%s netdevice driver\n", DRV_NAME); + return platform_driver_register(&flexcan_driver); +} + +static void __exit flexcan_exit(void) +{ + platform_driver_unregister(&flexcan_driver); + pr_info("%s: driver removed\n", DRV_NAME); +} + +module_init(flexcan_init); +module_exit(flexcan_exit); + +MODULE_AUTHOR("Sascha Hauer , " + "Marc Kleine-Budde "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN port driver for flexcan based chip"); diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h index 4ff966473bc9596477431637923a8fa5c2e8c2ab..b43e9f5d32683794508d88ea03098376abbb4d44 100644 --- a/drivers/net/can/mscan/mscan.h +++ b/drivers/net/can/mscan/mscan.h @@ -227,7 +227,7 @@ struct mscan_regs { u16 time; /* + 0x7c 0x3e */ } tx; _MSCAN_RESERVED_(32, 2); /* + 0x7e */ -} __attribute__ ((packed)); +} __packed; #undef _MSCAN_RESERVED_ #define MSCAN_REGION sizeof(struct mscan) diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 97ff6febad63d80f093403ae6be688da0ffba772..04525495b15bec744b2db5ceebe5b48dae650dfa 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -7,4 +7,10 @@ config CAN_EMS_USB This driver is for the one channel CPC-USB/ARM7 CAN/USB interface from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). +config CAN_ESD_USB2 + tristate "ESD USB/2 CAN/USB interface" + ---help--- + This driver supports the CAN-USB/2 interface + from esd electronic system design gmbh (http://www.esd.eu). + endmenu diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 0afd51d4c7a5632965855b650120bac72f34def4..fce3cf11719f972505a65c60d4947c3d4deb279f 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -3,5 +3,6 @@ # obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o +obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 1fc0871d2ef747254bae77b7a88162aad0ee0300..e75f1a87697266829a1f1c88e135823ff5d5c2f9 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -197,7 +197,7 @@ struct cpc_can_err_counter { }; /* Main message type used between library and application */ -struct __attribute__ ((packed)) ems_cpc_msg { +struct __packed ems_cpc_msg { u8 type; /* type of message */ u8 length; /* length of data within union 'msg' */ u8 msgid; /* confirmation handle */ diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c new file mode 100644 index 0000000000000000000000000000000000000000..05a52754f486a73a3efe3cba72b309a3fb9f72f5 --- /dev/null +++ b/drivers/net/can/usb/esd_usb2.c @@ -0,0 +1,1132 @@ +/* + * CAN driver for esd CAN-USB/2 + * + * Copyright (C) 2010 Matthias Fuchs , esd gmbh + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published + * by the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +MODULE_AUTHOR("Matthias Fuchs "); +MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces"); +MODULE_LICENSE("GPL v2"); + +/* Define these values to match your devices */ +#define USB_ESDGMBH_VENDOR_ID 0x0ab4 +#define USB_CANUSB2_PRODUCT_ID 0x0010 + +#define ESD_USB2_CAN_CLOCK 60000000 +#define ESD_USB2_MAX_NETS 2 + +/* USB2 commands */ +#define CMD_VERSION 1 /* also used for VERSION_REPLY */ +#define CMD_CAN_RX 2 /* device to host only */ +#define CMD_CAN_TX 3 /* also used for TX_DONE */ +#define CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */ +#define CMD_TS 5 /* also used for TS_REPLY */ +#define CMD_IDADD 6 /* also used for IDADD_REPLY */ + +/* esd CAN message flags - dlc field */ +#define ESD_RTR 0x10 + +/* esd CAN message flags - id field */ +#define ESD_EXTID 0x20000000 +#define ESD_EVENT 0x40000000 +#define ESD_IDMASK 0x1fffffff + +/* esd CAN event ids used by this driver */ +#define ESD_EV_CAN_ERROR_EXT 2 + +/* baudrate message flags */ +#define ESD_USB2_UBR 0x80000000 +#define ESD_USB2_LOM 0x40000000 +#define ESD_USB2_NO_BAUDRATE 0x7fffffff +#define ESD_USB2_TSEG1_MIN 1 +#define ESD_USB2_TSEG1_MAX 16 +#define ESD_USB2_TSEG1_SHIFT 16 +#define ESD_USB2_TSEG2_MIN 1 +#define ESD_USB2_TSEG2_MAX 8 +#define ESD_USB2_TSEG2_SHIFT 20 +#define ESD_USB2_SJW_MAX 4 +#define ESD_USB2_SJW_SHIFT 14 +#define ESD_USB2_BRP_MIN 1 +#define ESD_USB2_BRP_MAX 1024 +#define ESD_USB2_BRP_INC 1 +#define ESD_USB2_3_SAMPLES 0x00800000 + +/* esd IDADD message */ +#define ESD_ID_ENABLE 0x80 +#define ESD_MAX_ID_SEGMENT 64 + +/* SJA1000 ECC register (emulated by usb2 firmware) */ +#define SJA1000_ECC_SEG 0x1F +#define SJA1000_ECC_DIR 0x20 +#define SJA1000_ECC_ERR 0x06 +#define SJA1000_ECC_BIT 0x00 +#define SJA1000_ECC_FORM 0x40 +#define SJA1000_ECC_STUFF 0x80 +#define SJA1000_ECC_MASK 0xc0 + +/* esd bus state event codes */ +#define ESD_BUSSTATE_MASK 0xc0 +#define ESD_BUSSTATE_WARN 0x40 +#define ESD_BUSSTATE_ERRPASSIVE 0x80 +#define ESD_BUSSTATE_BUSOFF 0xc0 + +#define RX_BUFFER_SIZE 1024 +#define MAX_RX_URBS 4 +#define MAX_TX_URBS 16 /* must be power of 2 */ + +struct header_msg { + u8 len; /* len is always the total message length in 32bit words */ + u8 cmd; + u8 rsvd[2]; +}; + +struct version_msg { + u8 len; + u8 cmd; + u8 rsvd; + u8 flags; + __le32 drv_version; +}; + +struct version_reply_msg { + u8 len; + u8 cmd; + u8 nets; + u8 features; + __le32 version; + u8 name[16]; + __le32 rsvd; + __le32 ts; +}; + +struct rx_msg { + u8 len; + u8 cmd; + u8 net; + u8 dlc; + __le32 ts; + __le32 id; /* upper 3 bits contain flags */ + u8 data[8]; +}; + +struct tx_msg { + u8 len; + u8 cmd; + u8 net; + u8 dlc; + __le32 hnd; + __le32 id; /* upper 3 bits contain flags */ + u8 data[8]; +}; + +struct tx_done_msg { + u8 len; + u8 cmd; + u8 net; + u8 status; + __le32 hnd; + __le32 ts; +}; + +struct id_filter_msg { + u8 len; + u8 cmd; + u8 net; + u8 option; + __le32 mask[ESD_MAX_ID_SEGMENT + 1]; +}; + +struct set_baudrate_msg { + u8 len; + u8 cmd; + u8 net; + u8 rsvd; + __le32 baud; +}; + +/* Main message type used between library and application */ +struct __attribute__ ((packed)) esd_usb2_msg { + union { + struct header_msg hdr; + struct version_msg version; + struct version_reply_msg version_reply; + struct rx_msg rx; + struct tx_msg tx; + struct tx_done_msg txdone; + struct set_baudrate_msg setbaud; + struct id_filter_msg filter; + } msg; +}; + +static struct usb_device_id esd_usb2_table[] = { + {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)}, + {} +}; +MODULE_DEVICE_TABLE(usb, esd_usb2_table); + +struct esd_usb2_net_priv; + +struct esd_tx_urb_context { + struct esd_usb2_net_priv *priv; + u32 echo_index; + int dlc; +}; + +struct esd_usb2 { + struct usb_device *udev; + struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS]; + + struct usb_anchor rx_submitted; + + int net_count; + u32 version; + int rxinitdone; +}; + +struct esd_usb2_net_priv { + struct can_priv can; /* must be the first member */ + + atomic_t active_tx_jobs; + struct usb_anchor tx_submitted; + struct esd_tx_urb_context tx_contexts[MAX_TX_URBS]; + + int open_time; + struct esd_usb2 *usb2; + struct net_device *netdev; + int index; + u8 old_state; + struct can_berr_counter bec; +}; + +static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, + struct esd_usb2_msg *msg) +{ + struct net_device_stats *stats = &priv->netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK; + + if (id == ESD_EV_CAN_ERROR_EXT) { + u8 state = msg->msg.rx.data[0]; + u8 ecc = msg->msg.rx.data[1]; + u8 txerr = msg->msg.rx.data[2]; + u8 rxerr = msg->msg.rx.data[3]; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (skb == NULL) { + stats->rx_dropped++; + return; + } + + if (state != priv->old_state) { + priv->old_state = state; + + switch (state & ESD_BUSSTATE_MASK) { + case ESD_BUSSTATE_BUSOFF: + priv->can.state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + can_bus_off(priv->netdev); + break; + case ESD_BUSSTATE_WARN: + priv->can.state = CAN_STATE_ERROR_WARNING; + priv->can.can_stats.error_warning++; + break; + case ESD_BUSSTATE_ERRPASSIVE: + priv->can.state = CAN_STATE_ERROR_PASSIVE; + priv->can.can_stats.error_passive++; + break; + default: + priv->can.state = CAN_STATE_ERROR_ACTIVE; + break; + } + } else { + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (ecc & SJA1000_ECC_MASK) { + case SJA1000_ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case SJA1000_ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case SJA1000_ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + cf->data[3] = ecc & SJA1000_ECC_SEG; + break; + } + + /* Error occured during transmission? */ + if (!(ecc & SJA1000_ECC_DIR)) + cf->data[2] |= CAN_ERR_PROT_TX; + + if (priv->can.state == CAN_STATE_ERROR_WARNING || + priv->can.state == CAN_STATE_ERROR_PASSIVE) { + cf->data[1] = (txerr > rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + } + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + netif_rx(skb); + + priv->bec.txerr = txerr; + priv->bec.rxerr = rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + } +} + +static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, + struct esd_usb2_msg *msg) +{ + struct net_device_stats *stats = &priv->netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + int i; + u32 id; + + if (!netif_device_present(priv->netdev)) + return; + + id = le32_to_cpu(msg->msg.rx.id); + + if (id & ESD_EVENT) { + esd_usb2_rx_event(priv, msg); + } else { + skb = alloc_can_skb(priv->netdev, &cf); + if (skb == NULL) { + stats->rx_dropped++; + return; + } + + cf->can_id = id & ESD_IDMASK; + cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); + + if (id & ESD_EXTID) + cf->can_id |= CAN_EFF_FLAG; + + if (msg->msg.rx.dlc & ESD_RTR) { + cf->can_id |= CAN_RTR_FLAG; + } else { + for (i = 0; i < cf->can_dlc; i++) + cf->data[i] = msg->msg.rx.data[i]; + } + + netif_rx(skb); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + } + + return; +} + +static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv, + struct esd_usb2_msg *msg) +{ + struct net_device_stats *stats = &priv->netdev->stats; + struct net_device *netdev = priv->netdev; + struct esd_tx_urb_context *context; + + if (!netif_device_present(netdev)) + return; + + context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)]; + + if (!msg->msg.txdone.status) { + stats->tx_packets++; + stats->tx_bytes += context->dlc; + can_get_echo_skb(netdev, context->echo_index); + } else { + stats->tx_errors++; + can_free_echo_skb(netdev, context->echo_index); + } + + /* Release context */ + context->echo_index = MAX_TX_URBS; + atomic_dec(&priv->active_tx_jobs); + + netif_wake_queue(netdev); +} + +static void esd_usb2_read_bulk_callback(struct urb *urb) +{ + struct esd_usb2 *dev = urb->context; + int retval; + int pos = 0; + int i; + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -ESHUTDOWN: + return; + + default: + dev_info(dev->udev->dev.parent, + "Rx URB aborted (%d)\n", urb->status); + goto resubmit_urb; + } + + while (pos < urb->actual_length) { + struct esd_usb2_msg *msg; + + msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos); + + switch (msg->msg.hdr.cmd) { + case CMD_CAN_RX: + esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg); + break; + + case CMD_CAN_TX: + esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net], + msg); + break; + } + + pos += msg->msg.hdr.len << 2; + + if (pos > urb->actual_length) { + dev_err(dev->udev->dev.parent, "format error\n"); + break; + } + } + +resubmit_urb: + usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), + urb->transfer_buffer, RX_BUFFER_SIZE, + esd_usb2_read_bulk_callback, dev); + + retval = usb_submit_urb(urb, GFP_ATOMIC); + if (retval == -ENODEV) { + for (i = 0; i < dev->net_count; i++) { + if (dev->nets[i]) + netif_device_detach(dev->nets[i]->netdev); + } + } else if (retval) { + dev_err(dev->udev->dev.parent, + "failed resubmitting read bulk urb: %d\n", retval); + } + + return; +} + +/* + * callback for bulk IN urb + */ +static void esd_usb2_write_bulk_callback(struct urb *urb) +{ + struct esd_tx_urb_context *context = urb->context; + struct esd_usb2_net_priv *priv; + struct esd_usb2 *dev; + struct net_device *netdev; + size_t size = sizeof(struct esd_usb2_msg); + + WARN_ON(!context); + + priv = context->priv; + netdev = priv->netdev; + dev = priv->usb2; + + /* free up our allocated buffer */ + usb_free_coherent(urb->dev, size, + urb->transfer_buffer, urb->transfer_dma); + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n", + urb->status); + + netdev->trans_start = jiffies; +} + +static ssize_t show_firmware(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(d); + struct esd_usb2 *dev = usb_get_intfdata(intf); + + return sprintf(buf, "%d.%d.%d\n", + (dev->version >> 12) & 0xf, + (dev->version >> 8) & 0xf, + dev->version & 0xff); +} +static DEVICE_ATTR(firmware, S_IRUGO, show_firmware, NULL); + +static ssize_t show_hardware(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(d); + struct esd_usb2 *dev = usb_get_intfdata(intf); + + return sprintf(buf, "%d.%d.%d\n", + (dev->version >> 28) & 0xf, + (dev->version >> 24) & 0xf, + (dev->version >> 16) & 0xff); +} +static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL); + +static ssize_t show_nets(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(d); + struct esd_usb2 *dev = usb_get_intfdata(intf); + + return sprintf(buf, "%d", dev->net_count); +} +static DEVICE_ATTR(nets, S_IRUGO, show_nets, NULL); + +static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) +{ + int actual_length; + + return usb_bulk_msg(dev->udev, + usb_sndbulkpipe(dev->udev, 2), + msg, + msg->msg.hdr.len << 2, + &actual_length, + 1000); +} + +static int esd_usb2_wait_msg(struct esd_usb2 *dev, + struct esd_usb2_msg *msg) +{ + int actual_length; + + return usb_bulk_msg(dev->udev, + usb_rcvbulkpipe(dev->udev, 1), + msg, + sizeof(*msg), + &actual_length, + 1000); +} + +static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) +{ + int i, err = 0; + + if (dev->rxinitdone) + return 0; + + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf = NULL; + + /* create a URB, and a buffer for it */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + dev_warn(dev->udev->dev.parent, + "No memory left for URBs\n"); + err = -ENOMEM; + break; + } + + buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, + &urb->transfer_dma); + if (!buf) { + dev_warn(dev->udev->dev.parent, + "No memory left for USB buffer\n"); + err = -ENOMEM; + goto freeurb; + } + + usb_fill_bulk_urb(urb, dev->udev, + usb_rcvbulkpipe(dev->udev, 1), + buf, RX_BUFFER_SIZE, + esd_usb2_read_bulk_callback, dev); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &dev->rx_submitted); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, + urb->transfer_dma); + } + +freeurb: + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(urb); + if (err) + break; + } + + /* Did we submit any URBs */ + if (i == 0) { + dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n"); + return err; + } + + /* Warn if we've couldn't transmit all the URBs */ + if (i < MAX_RX_URBS) { + dev_warn(dev->udev->dev.parent, + "rx performance may be slow\n"); + } + + dev->rxinitdone = 1; + return 0; +} + +/* + * Start interface + */ +static int esd_usb2_start(struct esd_usb2_net_priv *priv) +{ + struct esd_usb2 *dev = priv->usb2; + struct net_device *netdev = priv->netdev; + struct esd_usb2_msg msg; + int err, i; + + /* + * Enable all IDs + * The IDADD message takes up to 64 32 bit bitmasks (2048 bits). + * Each bit represents one 11 bit CAN identifier. A set bit + * enables reception of the corresponding CAN identifier. A cleared + * bit disabled this identifier. An additional bitmask value + * following the CAN 2.0A bits is used to enable reception of + * extended CAN frames. Only the LSB of this final mask is checked + * for the complete 29 bit ID range. The IDADD message also allows + * filter configuration for an ID subset. In this case you can add + * the number of the starting bitmask (0..64) to the filter.option + * field followed by only some bitmasks. + */ + msg.msg.hdr.cmd = CMD_IDADD; + msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; + msg.msg.filter.net = priv->index; + msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ + for (i = 0; i < ESD_MAX_ID_SEGMENT; i++) + msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff); + /* enable 29bit extended IDs */ + msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001); + + err = esd_usb2_send_msg(dev, &msg); + if (err) + goto failed; + + err = esd_usb2_setup_rx_urbs(dev); + if (err) + goto failed; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + return 0; + +failed: + if (err == -ENODEV) + netif_device_detach(netdev); + + dev_err(netdev->dev.parent, "couldn't start device: %d\n", err); + + return err; +} + +static void unlink_all_urbs(struct esd_usb2 *dev) +{ + struct esd_usb2_net_priv *priv; + int i; + + usb_kill_anchored_urbs(&dev->rx_submitted); + for (i = 0; i < dev->net_count; i++) { + priv = dev->nets[i]; + if (priv) { + usb_kill_anchored_urbs(&priv->tx_submitted); + atomic_set(&priv->active_tx_jobs, 0); + + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; + } + } +} + +static int esd_usb2_open(struct net_device *netdev) +{ + struct esd_usb2_net_priv *priv = netdev_priv(netdev); + int err; + + /* common open */ + err = open_candev(netdev); + if (err) + return err; + + /* finally start device */ + err = esd_usb2_start(priv); + if (err) { + dev_warn(netdev->dev.parent, + "couldn't start device: %d\n", err); + close_candev(netdev); + return err; + } + + priv->open_time = jiffies; + + netif_start_queue(netdev); + + return 0; +} + +static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct esd_usb2_net_priv *priv = netdev_priv(netdev); + struct esd_usb2 *dev = priv->usb2; + struct esd_tx_urb_context *context = NULL; + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf = (struct can_frame *)skb->data; + struct esd_usb2_msg *msg; + struct urb *urb; + u8 *buf; + int i, err; + int ret = NETDEV_TX_OK; + size_t size = sizeof(struct esd_usb2_msg); + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + /* create a URB, and a buffer for it, and copy the data to the URB */ + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + dev_err(netdev->dev.parent, "No memory left for URBs\n"); + stats->tx_dropped++; + dev_kfree_skb(skb); + goto nourbmem; + } + + buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, + &urb->transfer_dma); + if (!buf) { + dev_err(netdev->dev.parent, "No memory left for USB buffer\n"); + stats->tx_dropped++; + dev_kfree_skb(skb); + goto nobufmem; + } + + msg = (struct esd_usb2_msg *)buf; + + msg->msg.hdr.len = 3; /* minimal length */ + msg->msg.hdr.cmd = CMD_CAN_TX; + msg->msg.tx.net = priv->index; + msg->msg.tx.dlc = cf->can_dlc; + msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); + + if (cf->can_id & CAN_RTR_FLAG) + msg->msg.tx.dlc |= ESD_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + msg->msg.tx.id |= cpu_to_le32(ESD_EXTID); + + for (i = 0; i < cf->can_dlc; i++) + msg->msg.tx.data[i] = cf->data[i]; + + msg->msg.hdr.len += (cf->can_dlc + 3) >> 2; + + for (i = 0; i < MAX_TX_URBS; i++) { + if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { + context = &priv->tx_contexts[i]; + break; + } + } + + /* + * This may never happen. + */ + if (!context) { + dev_warn(netdev->dev.parent, "couldn't find free context\n"); + ret = NETDEV_TX_BUSY; + goto releasebuf; + } + + context->priv = priv; + context->echo_index = i; + context->dlc = cf->can_dlc; + + /* hnd must not be 0 - MSB is stripped in txdone handling */ + msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */ + + usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, + msg->msg.hdr.len << 2, + esd_usb2_write_bulk_callback, context); + + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_anchor_urb(urb, &priv->tx_submitted); + + can_put_echo_skb(skb, netdev, context->echo_index); + + atomic_inc(&priv->active_tx_jobs); + + /* Slow down tx path */ + if (atomic_read(&priv->active_tx_jobs) >= MAX_TX_URBS) + netif_stop_queue(netdev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + can_free_echo_skb(netdev, context->echo_index); + + atomic_dec(&priv->active_tx_jobs); + usb_unanchor_urb(urb); + + stats->tx_dropped++; + + if (err == -ENODEV) + netif_device_detach(netdev); + else + dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err); + + goto releasebuf; + } + + netdev->trans_start = jiffies; + + /* + * Release our reference to this URB, the USB core will eventually free + * it entirely. + */ + usb_free_urb(urb); + + return NETDEV_TX_OK; + +releasebuf: + usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); + +nobufmem: + usb_free_urb(urb); + +nourbmem: + return ret; +} + +static int esd_usb2_close(struct net_device *netdev) +{ + struct esd_usb2_net_priv *priv = netdev_priv(netdev); + struct esd_usb2_msg msg; + int i; + + /* Disable all IDs (see esd_usb2_start()) */ + msg.msg.hdr.cmd = CMD_IDADD; + msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT; + msg.msg.filter.net = priv->index; + msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */ + for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++) + msg.msg.filter.mask[i] = 0; + if (esd_usb2_send_msg(priv->usb2, &msg) < 0) + dev_err(netdev->dev.parent, "sending idadd message failed\n"); + + /* set CAN controller to reset mode */ + msg.msg.hdr.len = 2; + msg.msg.hdr.cmd = CMD_SETBAUD; + msg.msg.setbaud.net = priv->index; + msg.msg.setbaud.rsvd = 0; + msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE); + if (esd_usb2_send_msg(priv->usb2, &msg) < 0) + dev_err(netdev->dev.parent, "sending setbaud message failed\n"); + + priv->can.state = CAN_STATE_STOPPED; + + netif_stop_queue(netdev); + + close_candev(netdev); + + priv->open_time = 0; + + return 0; +} + +static const struct net_device_ops esd_usb2_netdev_ops = { + .ndo_open = esd_usb2_open, + .ndo_stop = esd_usb2_close, + .ndo_start_xmit = esd_usb2_start_xmit, +}; + +static struct can_bittiming_const esd_usb2_bittiming_const = { + .name = "esd_usb2", + .tseg1_min = ESD_USB2_TSEG1_MIN, + .tseg1_max = ESD_USB2_TSEG1_MAX, + .tseg2_min = ESD_USB2_TSEG2_MIN, + .tseg2_max = ESD_USB2_TSEG2_MAX, + .sjw_max = ESD_USB2_SJW_MAX, + .brp_min = ESD_USB2_BRP_MIN, + .brp_max = ESD_USB2_BRP_MAX, + .brp_inc = ESD_USB2_BRP_INC, +}; + +static int esd_usb2_set_bittiming(struct net_device *netdev) +{ + struct esd_usb2_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *bt = &priv->can.bittiming; + struct esd_usb2_msg msg; + u32 canbtr; + + canbtr = ESD_USB2_UBR; + canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1); + canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1)) + << ESD_USB2_SJW_SHIFT; + canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1) + & (ESD_USB2_TSEG1_MAX - 1)) + << ESD_USB2_TSEG1_SHIFT; + canbtr |= ((bt->phase_seg2 - 1) & (ESD_USB2_TSEG2_MAX - 1)) + << ESD_USB2_TSEG2_SHIFT; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + canbtr |= ESD_USB2_3_SAMPLES; + + msg.msg.hdr.len = 2; + msg.msg.hdr.cmd = CMD_SETBAUD; + msg.msg.setbaud.net = priv->index; + msg.msg.setbaud.rsvd = 0; + msg.msg.setbaud.baud = cpu_to_le32(canbtr); + + dev_info(netdev->dev.parent, "setting BTR=%#x\n", canbtr); + + return esd_usb2_send_msg(priv->usb2, &msg); +} + +static int esd_usb2_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct esd_usb2_net_priv *priv = netdev_priv(netdev); + + bec->txerr = priv->bec.txerr; + bec->rxerr = priv->bec.rxerr; + + return 0; +} + +static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode) +{ + struct esd_usb2_net_priv *priv = netdev_priv(netdev); + + if (!priv->open_time) + return -EINVAL; + + switch (mode) { + case CAN_MODE_START: + netif_wake_queue(netdev); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int esd_usb2_probe_one_net(struct usb_interface *intf, int index) +{ + struct esd_usb2 *dev = usb_get_intfdata(intf); + struct net_device *netdev; + struct esd_usb2_net_priv *priv; + int err = 0; + int i; + + netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); + if (!netdev) { + dev_err(&intf->dev, "couldn't alloc candev\n"); + err = -ENOMEM; + goto done; + } + + priv = netdev_priv(netdev); + + init_usb_anchor(&priv->tx_submitted); + atomic_set(&priv->active_tx_jobs, 0); + + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; + + priv->usb2 = dev; + priv->netdev = netdev; + priv->index = index; + + priv->can.state = CAN_STATE_STOPPED; + priv->can.clock.freq = ESD_USB2_CAN_CLOCK; + priv->can.bittiming_const = &esd_usb2_bittiming_const; + priv->can.do_set_bittiming = esd_usb2_set_bittiming; + priv->can.do_set_mode = esd_usb2_set_mode; + priv->can.do_get_berr_counter = esd_usb2_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + + netdev->flags |= IFF_ECHO; /* we support local echo */ + + netdev->netdev_ops = &esd_usb2_netdev_ops; + + SET_NETDEV_DEV(netdev, &intf->dev); + + err = register_candev(netdev); + if (err) { + dev_err(&intf->dev, + "couldn't register CAN device: %d\n", err); + free_candev(netdev); + err = -ENOMEM; + goto done; + } + + dev->nets[index] = priv; + dev_info(netdev->dev.parent, "device %s registered\n", netdev->name); + +done: + return err; +} + +/* + * probe function for new USB2 devices + * + * check version information and number of available + * CAN interfaces + */ +static int esd_usb2_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct esd_usb2 *dev; + struct esd_usb2_msg msg; + int i, err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + err = -ENOMEM; + goto done; + } + + dev->udev = interface_to_usbdev(intf); + + init_usb_anchor(&dev->rx_submitted); + + usb_set_intfdata(intf, dev); + + /* query number of CAN interfaces (nets) */ + msg.msg.hdr.cmd = CMD_VERSION; + msg.msg.hdr.len = 2; + msg.msg.version.rsvd = 0; + msg.msg.version.flags = 0; + msg.msg.version.drv_version = 0; + + err = esd_usb2_send_msg(dev, &msg); + if (err < 0) { + dev_err(&intf->dev, "sending version message failed\n"); + goto free_dev; + } + + err = esd_usb2_wait_msg(dev, &msg); + if (err < 0) { + dev_err(&intf->dev, "no version message answer\n"); + goto free_dev; + } + + dev->net_count = (int)msg.msg.version_reply.nets; + dev->version = le32_to_cpu(msg.msg.version_reply.version); + + if (device_create_file(&intf->dev, &dev_attr_firmware)) + dev_err(&intf->dev, + "Couldn't create device file for firmware\n"); + + if (device_create_file(&intf->dev, &dev_attr_hardware)) + dev_err(&intf->dev, + "Couldn't create device file for hardware\n"); + + if (device_create_file(&intf->dev, &dev_attr_nets)) + dev_err(&intf->dev, + "Couldn't create device file for nets\n"); + + /* do per device probing */ + for (i = 0; i < dev->net_count; i++) + esd_usb2_probe_one_net(intf, i); + + return 0; + +free_dev: + kfree(dev); +done: + return err; +} + +/* + * called by the usb core when the device is removed from the system + */ +static void esd_usb2_disconnect(struct usb_interface *intf) +{ + struct esd_usb2 *dev = usb_get_intfdata(intf); + struct net_device *netdev; + int i; + + device_remove_file(&intf->dev, &dev_attr_firmware); + device_remove_file(&intf->dev, &dev_attr_hardware); + device_remove_file(&intf->dev, &dev_attr_nets); + + usb_set_intfdata(intf, NULL); + + if (dev) { + for (i = 0; i < dev->net_count; i++) { + if (dev->nets[i]) { + netdev = dev->nets[i]->netdev; + unregister_netdev(netdev); + free_candev(netdev); + } + } + unlink_all_urbs(dev); + } +} + +/* usb specific object needed to register this driver with the usb subsystem */ +static struct usb_driver esd_usb2_driver = { + .name = "esd_usb2", + .probe = esd_usb2_probe, + .disconnect = esd_usb2_disconnect, + .id_table = esd_usb2_table, +}; + +static int __init esd_usb2_init(void) +{ + int err; + + /* register this driver with the USB subsystem */ + err = usb_register(&esd_usb2_driver); + + if (err) { + err("usb_register failed. Error number %d\n", err); + return err; + } + + return 0; +} +module_init(esd_usb2_init); + +static void __exit esd_usb2_exit(void) +{ + /* deregister this driver with the USB subsystem */ + usb_deregister(&esd_usb2_driver); +} +module_exit(esd_usb2_exit); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 04a03f7003a0d9e0e0d2bf0d852c90f253fcaa56..28c88eeec757361fb2fa3db11a63b57c97b892fb 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -107,12 +107,7 @@ #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) #define CAS_NCPUS num_online_cpus() -#ifdef CONFIG_CASSINI_NAPI -#define USE_NAPI -#define cas_skb_release(x) netif_receive_skb(x) -#else #define cas_skb_release(x) netif_rx(x) -#endif /* select which firmware to use */ #define USE_HP_WORKAROUND @@ -3063,9 +3058,6 @@ static void cas_init_mac(struct cas *cp) { unsigned char *e = &cp->dev->dev_addr[0]; int i; -#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE - u32 rxcfg; -#endif cas_mac_reset(cp); /* setup core arbitration weight register */ @@ -3133,23 +3125,8 @@ static void cas_init_mac(struct cas *cp) writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); -#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE cp->mac_rx_cfg = cas_setup_multicast(cp); -#else - /* WTZ: Do what Adrian did in cas_set_multicast. Doing - * a writel does not seem to be necessary because Cassini - * seems to preserve the configuration when we do the reset. - * If the chip is in trouble, though, it is not clear if we - * can really count on this behavior. cas_set_multicast uses - * spin_lock_irqsave, but we are called only in cas_init_hw and - * cas_init_hw is protected by cas_lock_all, which calls - * spin_lock_irq (so it doesn't need to save the flags, and - * we should be OK for the writel, as that is the only - * difference). - */ - cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); - writel(rxcfg, cp->regs + REG_MAC_RX_CFG); -#endif + spin_lock(&cp->stat_lock[N_TX_RINGS]); cas_clear_mac_err(cp); spin_unlock(&cp->stat_lock[N_TX_RINGS]); diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h index fd17a002b453c7bae6acfec276f13a6b3717560e..dbc47878d83bf250970f70d0afb6afde6839deb3 100644 --- a/drivers/net/cassini.h +++ b/drivers/net/cassini.h @@ -2844,10 +2844,6 @@ struct cas { atomic_t reset_task_pending_all; #endif -#ifdef CONFIG_CASSINI_QGE_DEBUG - atomic_t interrupt_seen; /* 1 if any interrupts are getting through */ -#endif - /* Link-down problem workaround */ #define LINK_TRANSITION_UNKNOWN 0 #define LINK_TRANSITION_ON_FAILURE 1 diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 036b2dfb1d40b2c1155d54a34ecfb0ae75056acc..092f31a126e6daa1fee2bdc75b818081cc2a2fd3 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h @@ -286,7 +286,6 @@ struct board_info { unsigned int clock_mc3; unsigned int clock_mc4; unsigned int espi_nports; - unsigned int clock_cspi; unsigned int clock_elmer0; unsigned char mdio_mdien; unsigned char mdio_mdiinv; diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 53bde15fc94d49933b0c84070084185b6c4ddaef..599d178df62ddd0e1289369802c04c18921a1583 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c @@ -185,9 +185,6 @@ static int t1_pci_intr_handler(adapter_t *adapter) return 0; } -#ifdef CONFIG_CHELSIO_T1_COUGAR -#include "cspi.h" -#endif #ifdef CONFIG_CHELSIO_T1_1G #include "fpga_defs.h" @@ -280,7 +277,7 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); } -#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) +#if defined(CONFIG_CHELSIO_T1_1G) /* * Elmer MI1 MDIO read/write operations. */ @@ -317,7 +314,7 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr, return 0; } -#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) +#if defined(CONFIG_CHELSIO_T1_1G) static const struct mdio_ops mi1_mdio_ops = { .init = mi1_mdio_init, .read = mi1_mdio_read, @@ -752,31 +749,6 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter) mod_detect ? "removed" : "inserted"); } break; -#ifdef CONFIG_CHELSIO_T1_COUGAR - case CHBT_BOARD_COUGAR: - if (adapter->params.nports == 1) { - if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */ - struct cmac *mac = adapter->port[0].mac; - mac->ops->interrupt_handler(mac); - } - if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ - } - } else { - int i, port_bit; - - for_each_port(adapter, i) { - port_bit = i ? i + 1 : 0; - if (!(cause & (1 << port_bit))) - continue; - - phy = adapter->port[i].phy; - phy_cause = phy->ops->interrupt_handler(phy); - if (phy_cause & cphy_cause_link_change) - t1_link_changed(adapter, i); - } - } - break; -#endif } t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); return 0; @@ -955,7 +927,6 @@ static int board_init(adapter_t *adapter, const struct board_info *bi) case CHBT_BOARD_N110: case CHBT_BOARD_N210: case CHBT_BOARD_CHT210: - case CHBT_BOARD_COUGAR: t1_tpi_par(adapter, 0xf); t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); break; @@ -1004,10 +975,6 @@ int t1_init_hw_modules(adapter_t *adapter) adapter->regs + A_MC5_CONFIG); } -#ifdef CONFIG_CHELSIO_T1_COUGAR - if (adapter->cspi && t1_cspi_init(adapter->cspi)) - goto out_err; -#endif if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, bi->espi_nports)) goto out_err; @@ -1061,10 +1028,6 @@ void t1_free_sw_modules(adapter_t *adapter) t1_tp_destroy(adapter->tp); if (adapter->espi) t1_espi_destroy(adapter->espi); -#ifdef CONFIG_CHELSIO_T1_COUGAR - if (adapter->cspi) - t1_cspi_destroy(adapter->cspi); -#endif } static void __devinit init_link_config(struct link_config *lc, @@ -1084,14 +1047,6 @@ static void __devinit init_link_config(struct link_config *lc, } } -#ifdef CONFIG_CHELSIO_T1_COUGAR - if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) { - pr_err("%s: CSPI initialization failed\n", - adapter->name); - goto error; - } -#endif - /* * Allocate and initialize the data structures that hold the SW state of * the Terminator HW modules. diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 80471269977adefdaae7e0a7845611a2e1e7cadc..09610323a948cab243d083c1a8435853b8893184 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -40,9 +40,9 @@ #include "cnic_if.h" #include "bnx2.h" -#include "bnx2x_reg.h" -#include "bnx2x_fw_defs.h" -#include "bnx2x_hsi.h" +#include "bnx2x/bnx2x_reg.h" +#include "bnx2x/bnx2x_fw_defs.h" +#include "bnx2x/bnx2x_hsi.h" #include "../scsi/bnx2i/57xx_iscsi_constants.h" #include "../scsi/bnx2i/57xx_iscsi_hsi.h" #include "cnic.h" @@ -257,7 +257,7 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) { u32 i; - for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { + for (i = 0; i < cp->max_cid_space; i++) { if (cp->ctx_tbl[i].cid == cid) { *l5_cid = i; return 0; @@ -804,7 +804,7 @@ static void cnic_free_resc(struct cnic_dev *dev) cnic_free_dma(dev, &cp->conn_buf_info); cnic_free_dma(dev, &cp->kwq_info); cnic_free_dma(dev, &cp->kwq_16_data_info); - cnic_free_dma(dev, &cp->kcq_info); + cnic_free_dma(dev, &cp->kcq1.dma); kfree(cp->iscsi_tbl); cp->iscsi_tbl = NULL; kfree(cp->ctx_tbl); @@ -863,6 +863,37 @@ static int cnic_alloc_context(struct cnic_dev *dev) return 0; } +static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info) +{ + int err, i, is_bnx2 = 0; + struct kcqe **kcq; + + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) + is_bnx2 = 1; + + err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2); + if (err) + return err; + + kcq = (struct kcqe **) info->dma.pg_arr; + info->kcq = kcq; + + if (is_bnx2) + return 0; + + for (i = 0; i < KCQ_PAGE_CNT; i++) { + struct bnx2x_bd_chain_next *next = + (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; + int j = i + 1; + + if (j >= KCQ_PAGE_CNT) + j = 0; + next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; + next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; + } + return 0; +} + static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) { struct cnic_local *cp = dev->cnic_priv; @@ -954,10 +985,9 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) goto error; cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; - ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); + ret = cnic_alloc_kcq(dev, &cp->kcq1); if (ret) goto error; - cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; ret = cnic_alloc_context(dev); if (ret) @@ -981,17 +1011,10 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; int ctx_blk_size = cp->ethdev->ctx_blk_size; - int total_mem, blks, i, cid_space; - - if (BNX2X_ISCSI_START_CID < ethdev->starting_cid) - return -EINVAL; - - cid_space = MAX_ISCSI_TBL_SZ + - (BNX2X_ISCSI_START_CID - ethdev->starting_cid); + int total_mem, blks, i; - total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space; + total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; blks = total_mem / ctx_blk_size; if (total_mem % ctx_blk_size) blks++; @@ -1035,16 +1058,27 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 start_cid = ethdev->starting_cid; int i, j, n, ret, pages; struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; + cp->max_cid_space = MAX_ISCSI_TBL_SZ; + cp->iscsi_start_cid = start_cid; + if (start_cid < BNX2X_ISCSI_START_CID) { + u32 delta = BNX2X_ISCSI_START_CID - start_cid; + + cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; + cp->max_cid_space += delta; + } + cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, GFP_KERNEL); if (!cp->iscsi_tbl) goto error; cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * - MAX_CNIC_L5_CONTEXT, GFP_KERNEL); + cp->max_cid_space, GFP_KERNEL); if (!cp->ctx_tbl) goto error; @@ -1053,7 +1087,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; } - pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) / + pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / PAGE_SIZE; ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); @@ -1061,7 +1095,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) return -ENOMEM; n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; - for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) { + for (i = 0, j = 0; i < cp->max_cid_space; i++) { long off = CNIC_KWQ16_DATA_SIZE * (i % n); cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; @@ -1072,22 +1106,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) j++; } - ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0); + ret = cnic_alloc_kcq(dev, &cp->kcq1); if (ret) goto error; - cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; - - for (i = 0; i < KCQ_PAGE_CNT; i++) { - struct bnx2x_bd_chain_next *next = - (struct bnx2x_bd_chain_next *) - &cp->kcq[i][MAX_KCQE_CNT]; - int j = i + 1; - - if (j >= KCQ_PAGE_CNT) - j = 0; - next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32; - next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff; - } pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; @@ -2120,18 +2141,20 @@ static u16 cnic_bnx2x_hw_idx(u16 idx) return idx; } -static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) +static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) { struct cnic_local *cp = dev->cnic_priv; - u16 i, ri, last; + u16 i, ri, hw_prod, last; struct kcqe *kcqe; int kcqe_cnt = 0, last_cnt = 0; - i = ri = last = *sw_prod; + i = ri = last = info->sw_prod_idx; ri &= MAX_KCQ_IDX; + hw_prod = *info->hw_prod_idx_ptr; + hw_prod = cp->hw_idx(hw_prod); while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { - kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; + kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; cp->completed_kcq[kcqe_cnt++] = kcqe; i = cp->next_idx(i); ri = i & MAX_KCQ_IDX; @@ -2141,7 +2164,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) } } - *sw_prod = last; + info->sw_prod_idx = last; return last_cnt; } @@ -2184,6 +2207,9 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp) u16 tx_cons = *cp->tx_cons_ptr; int comp = 0; + if (!test_bit(CNIC_F_CNIC_UP, &cp->dev->flags)) + return; + if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) comp = cnic_l2_completion(cp); @@ -2197,103 +2223,79 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp) clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); } -static int cnic_service_bnx2(void *data, void *status_blk) +static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) { - struct cnic_dev *dev = data; - struct status_block *sblk = status_blk; struct cnic_local *cp = dev->cnic_priv; - u32 status_idx = sblk->status_idx; - u16 hw_prod, sw_prod; + u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; int kcqe_cnt; - if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) - return status_idx; - cp->kwq_con_idx = *cp->kwq_con_idx_ptr; - hw_prod = sblk->status_completion_producer_index; - sw_prod = cp->kcq_prod_idx; - while (sw_prod != hw_prod) { - kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); - if (kcqe_cnt == 0) - goto done; + while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { service_kcqes(dev, kcqe_cnt); /* Tell compiler that status_blk fields can change. */ barrier(); - if (status_idx != sblk->status_idx) { - status_idx = sblk->status_idx; + if (status_idx != *cp->kcq1.status_idx_ptr) { + status_idx = (u16) *cp->kcq1.status_idx_ptr; cp->kwq_con_idx = *cp->kwq_con_idx_ptr; - hw_prod = sblk->status_completion_producer_index; } else break; } -done: - CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); - - cp->kcq_prod_idx = sw_prod; + CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); cnic_chk_pkt_rings(cp); + return status_idx; } -static void cnic_service_bnx2_msix(unsigned long data) +static int cnic_service_bnx2(void *data, void *status_blk) { - struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_dev *dev = data; struct cnic_local *cp = dev->cnic_priv; - struct status_block_msix *status_blk = cp->status_blk.bnx2; - u32 status_idx = status_blk->status_idx; - u16 hw_prod, sw_prod; - int kcqe_cnt; + u32 status_idx = *cp->kcq1.status_idx_ptr; - cp->kwq_con_idx = status_blk->status_cmd_consumer_index; - - hw_prod = status_blk->status_completion_producer_index; - sw_prod = cp->kcq_prod_idx; - while (sw_prod != hw_prod) { - kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); - if (kcqe_cnt == 0) - goto done; - - service_kcqes(dev, kcqe_cnt); + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) + return status_idx; - /* Tell compiler that status_blk fields can change. */ - barrier(); - if (status_idx != status_blk->status_idx) { - status_idx = status_blk->status_idx; - cp->kwq_con_idx = status_blk->status_cmd_consumer_index; - hw_prod = status_blk->status_completion_producer_index; - } else - break; - } + return cnic_service_bnx2_queues(dev); +} -done: - CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); - cp->kcq_prod_idx = sw_prod; +static void cnic_service_bnx2_msix(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; - cnic_chk_pkt_rings(cp); + cp->last_status_idx = cnic_service_bnx2_queues(dev); - cp->last_status_idx = status_idx; CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); } +static void cnic_doirq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; + + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + prefetch(cp->status_blk.gen); + prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + + tasklet_schedule(&cp->cnic_irq_task); + } +} + static irqreturn_t cnic_irq(int irq, void *dev_instance) { struct cnic_dev *dev = dev_instance; struct cnic_local *cp = dev->cnic_priv; - u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; if (cp->ack_int) cp->ack_int(dev); - prefetch(cp->status_blk.gen); - prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); - - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) - tasklet_schedule(&cp->cnic_irq_task); + cnic_doirq(dev); return IRQ_HANDLED; } @@ -2324,60 +2326,50 @@ static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) IGU_INT_DISABLE, 0); } -static void cnic_service_bnx2x_bh(unsigned long data) +static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) { - struct cnic_dev *dev = (struct cnic_dev *) data; - struct cnic_local *cp = dev->cnic_priv; - u16 hw_prod, sw_prod; - struct cstorm_status_block_c *sblk = - &cp->status_blk.bnx2x->c_status_block; - u32 status_idx = sblk->status_block_index; + u32 last_status = *info->status_idx_ptr; int kcqe_cnt; - if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) - return; - - hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; - hw_prod = cp->hw_idx(hw_prod); - sw_prod = cp->kcq_prod_idx; - while (sw_prod != hw_prod) { - kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); - if (kcqe_cnt == 0) - goto done; + while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { service_kcqes(dev, kcqe_cnt); /* Tell compiler that sblk fields can change. */ barrier(); - if (status_idx == sblk->status_block_index) + if (last_status == *info->status_idx_ptr) break; - status_idx = sblk->status_block_index; - hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; - hw_prod = cp->hw_idx(hw_prod); + last_status = *info->status_idx_ptr; } + return last_status; +} -done: - CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX); +static void cnic_service_bnx2x_bh(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; + u32 status_idx; + + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) + return; + + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); + + CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, status_idx, IGU_INT_ENABLE, 1); - - cp->kcq_prod_idx = sw_prod; } static int cnic_service_bnx2x(void *data, void *status_blk) { struct cnic_dev *dev = data; struct cnic_local *cp = dev->cnic_priv; - u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { - prefetch(cp->status_blk.bnx2x); - prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + cnic_doirq(dev); - tasklet_schedule(&cp->cnic_irq_task); - cnic_chk_pkt_rings(cp); - } + cnic_chk_pkt_rings(cp); return 0; } @@ -2824,7 +2816,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, err = ip_route_output_key(&init_net, &rt, &fl); if (!err) - *dst = &rt->u.dst; + *dst = &rt->dst; return err; #else return -ENETUNREACH; @@ -2996,7 +2988,7 @@ static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) static int cnic_cm_abort(struct cnic_sock *csk) { struct cnic_local *cp = csk->dev->cnic_priv; - u32 opcode; + u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; if (!cnic_in_use(csk)) return -EINVAL; @@ -3008,12 +3000,9 @@ static int cnic_cm_abort(struct cnic_sock *csk) * connect was not successful. */ - csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) - opcode = csk->state; - else - opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; cp->close_conn(csk, opcode); + if (csk->state != opcode) + return -EALREADY; return 0; } @@ -3026,6 +3015,8 @@ static int cnic_cm_close(struct cnic_sock *csk) if (cnic_close_prep(csk)) { csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; return cnic_cm_close_req(csk); + } else { + return -EALREADY; } return 0; } @@ -3141,12 +3132,6 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) break; case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: - if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { - cnic_cm_upcall(cp, csk, opcode); - break; - } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) - csk->state = opcode; - /* fall through */ case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: @@ -3202,19 +3187,22 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) { - if ((opcode == csk->state) || - (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && - csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { - if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) - return 1; + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + /* Unsolicited RESET_COMP or RESET_RECEIVED */ + opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; + csk->state = opcode; } - /* 57710+ only workaround to handle unsolicited RESET_COMP - * which will be treated like a RESET RCVD notification - * which triggers the clean up procedure + + /* 1. If event opcode matches the expected event in csk->state + * 2. If the expected event is CLOSE_COMP, we accept any event + * 3. If the expected event is 0, meaning the connection was never + * never established, we accept the opcode from cm_abort. */ - else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) { + if (opcode == csk->state || csk->state == 0 || + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) { if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { - csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; + if (csk->state == 0) + csk->state = opcode; return 1; } } @@ -3226,8 +3214,14 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) struct cnic_dev *dev = csk->dev; struct cnic_local *cp = dev->cnic_priv; + if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { + cnic_cm_upcall(cp, csk, opcode); + return; + } + clear_bit(SK_F_CONNECT_START, &csk->flags); cnic_close_conn(csk); + csk->state = opcode; cnic_cm_upcall(cp, csk, opcode); } @@ -3257,8 +3251,12 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: - if (cnic_ready_to_close(csk, opcode)) - cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; + if (cnic_ready_to_close(csk, opcode)) { + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; + else + close_complete = 1; + } break; case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; @@ -3694,7 +3692,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; struct status_block *sblk = cp->status_blk.gen; - u32 val; + u32 val, kcq_cid_addr, kwq_cid_addr; int err; cnic_set_bnx2_mac(dev); @@ -3719,7 +3717,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) cnic_init_context(dev, KWQ_CID); cnic_init_context(dev, KCQ_CID); - cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); + kwq_cid_addr = GET_CID_ADDR(KWQ_CID); cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; cp->max_kwq_idx = MAX_KWQ_IDX; @@ -3735,50 +3733,59 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) /* Initialize the kernel work queue context. */ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); val = (u32) cp->kwq_info.pgtbl_map; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + kcq_cid_addr = GET_CID_ADDR(KCQ_CID); + cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; - cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); - cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; + cp->kcq1.sw_prod_idx = 0; + cp->kcq1.hw_prod_idx_ptr = + (u16 *) &sblk->status_completion_producer_index; - cp->kcq_prod_idx = 0; + cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; /* Initialize the kernel complete queue context. */ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); - val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); - val = (u32) cp->kcq_info.pgtbl_map; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + val = (u32) cp->kcq1.dma.pgtbl_map; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); cp->int_num = 0; if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *msblk = cp->status_blk.bnx2; u32 sb_id = cp->status_blk_num; u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); + cp->kcq1.hw_prod_idx_ptr = + (u16 *) &msblk->status_completion_producer_index; + cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; + cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); } /* Enable Commnad Scheduler notification when we write to the @@ -4123,33 +4130,39 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) u8 sb_id = cp->status_blk_num; ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, - BNX2X_ISCSI_START_CID); + cp->iscsi_start_cid); if (ret) return -ENOMEM; - cp->kcq_io_addr = BAR_CSTRORM_INTMEM + + cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); - cp->kcq_prod_idx = 0; + cp->kcq1.sw_prod_idx = 0; + + cp->kcq1.hw_prod_idx_ptr = + &cp->status_blk.bnx2x->c_status_block.index_values[ + HC_INDEX_C_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &cp->status_blk.bnx2x->c_status_block.status_block_index; cnic_get_bnx2x_iscsi_info(dev); /* Only 1 EQ */ - CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX); + CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); CNIC_WR(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); CNIC_WR(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), - cp->kcq_info.pg_map_arr[1] & 0xffffffff); + cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); CNIC_WR(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, - (u64) cp->kcq_info.pg_map_arr[1] >> 32); + (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); CNIC_WR(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), - cp->kcq_info.pg_map_arr[0] & 0xffffffff); + cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); CNIC_WR(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, - (u64) cp->kcq_info.pg_map_arr[0] >> 32); + (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); CNIC_WR8(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); CNIC_WR16(dev, BAR_CSTRORM_INTMEM + @@ -4377,7 +4390,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 0); CNIC_WR(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0); - CNIC_WR16(dev, cp->kcq_io_addr, 0); + CNIC_WR16(dev, cp->kcq1.io_addr, 0); cnic_free_resc(dev); } diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h index 08b1235d987db09492df534521a2fd9d89c0e8d5..275c36114d852a4fd5bd97d1e1da6feea0b629bb 100644 --- a/drivers/net/cnic.h +++ b/drivers/net/cnic.h @@ -169,6 +169,16 @@ struct cnic_context { } proto; }; +struct kcq_info { + struct cnic_dma dma; + struct kcqe **kcq; + + u16 *hw_prod_idx_ptr; + u16 sw_prod_idx; + u16 *status_idx_ptr; + u32 io_addr; +}; + struct cnic_local { spinlock_t cnic_ulp_lock; @@ -202,9 +212,6 @@ struct cnic_local { u16 rx_cons; u16 tx_cons; - u32 kwq_cid_addr; - u32 kcq_cid_addr; - struct cnic_dma kwq_info; struct kwqe **kwq; @@ -218,11 +225,7 @@ struct cnic_local { u16 *kwq_con_idx_ptr; u16 kwq_con_idx; - struct cnic_dma kcq_info; - struct kcqe **kcq; - - u16 kcq_prod_idx; - u32 kcq_io_addr; + struct kcq_info kcq1; union { void *gen; @@ -248,8 +251,10 @@ struct cnic_local { struct cnic_iscsi *iscsi_tbl; struct cnic_context *ctx_tbl; struct cnic_id_tbl cid_tbl; - int max_iscsi_conn; atomic_t iscsi_conn; + u32 iscsi_start_cid; + + u32 max_cid_space; /* per connection parameters */ int num_iscsi_tasks; diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 0c55177db046ddf6ce4bb0a684dea0fed459b201..344c842d55aba180eaade74b45b4912b7facd8b8 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h @@ -12,8 +12,8 @@ #ifndef CNIC_IF_H #define CNIC_IF_H -#define CNIC_MODULE_VERSION "2.1.2" -#define CNIC_MODULE_RELDATE "May 26, 2010" +#define CNIC_MODULE_VERSION "2.1.3" +#define CNIC_MODULE_RELDATE "June 24, 2010" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 23786ee34beded0a9ae6d2c0484ca75965eeab93..e1f6156b3710e9888f779610ddb10a52fcfd5d9d 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -846,11 +846,8 @@ static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EINVAL; if (!priv->phy) return -EINVAL; - if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) || - (cmd == SIOCSMIIREG)) - return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd); - return -EOPNOTSUPP; + return phy_mii_ioctl(priv->phy, ifr, cmd); } static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -964,7 +961,7 @@ static int cpmac_open(struct net_device *dev) struct sk_buff *skb; mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); - if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { + if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: failed to request registers\n", dev->name); @@ -972,7 +969,7 @@ static int cpmac_open(struct net_device *dev) goto fail_reserve; } - priv->regs = ioremap(mem->start, mem->end - mem->start); + priv->regs = ioremap(mem->start, resource_size(mem)); if (!priv->regs) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: failed to remap registers\n", @@ -1049,7 +1046,7 @@ static int cpmac_open(struct net_device *dev) iounmap(priv->regs); fail_remap: - release_mem_region(mem->start, mem->end - mem->start); + release_mem_region(mem->start, resource_size(mem)); fail_reserve: return res; @@ -1077,7 +1074,7 @@ static int cpmac_stop(struct net_device *dev) free_irq(dev->irq, dev); iounmap(priv->regs); mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); - release_mem_region(mem->start, mem->end - mem->start); + release_mem_region(mem->start, resource_size(mem)); priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; for (i = 0; i < priv->ring_size; i++) { if (priv->rx_head[i].skb) { diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 7e00027b9f8e8fbaf395f18f30d1ee059a87234f..81475cc80e1cfd77f364afecde37bf21d9261052 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1702,11 +1702,7 @@ e100_set_network_leds(int active) if (!current_speed) { /* Make LED red, link is down */ -#if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION) - CRIS_LED_NETWORK_SET(CRIS_LED_RED); -#else CRIS_LED_NETWORK_SET(CRIS_LED_OFF); -#endif } else if (light_leds) { if (current_speed == 10) { CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE); diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 2ccb9f12805b5f75c0f61c1c1550c573084650fc..d325e01a53e0a79bf8af90f6103df11513604cc5 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -170,22 +170,12 @@ static char version[] __initdata = /* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps them to system IRQ numbers. This mapping is card specific and is set to the configuration of the Cirrus Eval board for this chip. */ -#if defined(CONFIG_SH_HICOSH4) -static unsigned int netcard_portlist[] __used __initdata = - { 0x0300, 0}; -static unsigned int cs8900_irq_map[] = {1,0,0,0}; -#elif defined(CONFIG_MACH_IXDP2351) +#if defined(CONFIG_MACH_IXDP2351) static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; #elif defined(CONFIG_ARCH_IXDP2X01) static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; -#elif defined(CONFIG_ARCH_PNX010X) -#include -#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */ -#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */ -static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0}; -static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0}; #elif defined(CONFIG_MACH_MX31ADS) #include static unsigned int netcard_portlist[] __used __initdata = { @@ -218,7 +208,6 @@ static unsigned int net_debug = DEBUGGING; /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; int chip_type; /* one of: CS8900, CS8920, CS8920M */ char chip_revision; /* revision letter of the chip ('A'...) */ int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */ @@ -257,7 +246,7 @@ static void reset_chip(struct net_device *dev); static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer); static int get_eeprom_cksum(int off, int len, int *buffer); static int set_mac_address(struct net_device *dev, void *addr); -static void count_rx_errors(int status, struct net_local *lp); +static void count_rx_errors(int status, struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER static void net_poll_controller(struct net_device *dev); #endif @@ -372,18 +361,6 @@ writeword(unsigned long base_addr, int portno, u16 value) { __raw_writel(value, base_addr + (portno << 1)); } -#elif defined(CONFIG_ARCH_PNX010X) -static u16 -readword(unsigned long base_addr, int portno) -{ - return inw(base_addr + (portno << 1)); -} - -static void -writeword(unsigned long base_addr, int portno, u16 value) -{ - outw(value, base_addr + (portno << 1)); -} #else static u16 readword(unsigned long base_addr, int portno) @@ -546,30 +523,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) #endif } -#ifdef CONFIG_ARCH_PNX010X - initialize_ebi(); - - /* Map GPIO registers for the pins connected to the CS8900a. */ - if (map_cirrus_gpio() < 0) - return -ENODEV; - - reset_cirrus(); - - /* Map event-router registers. */ - if (map_event_router() < 0) - return -ENODEV; - - enable_cirrus_irq(); - - unmap_cirrus_gpio(); - unmap_event_router(); - - dev->base_addr = ioaddr; - - for (i = 0 ; i < 3 ; i++) - readreg(dev, 0); -#endif - /* Grab the region so we can find another board if autoIRQ fails. */ /* WTF is going on here? */ if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) { @@ -579,12 +532,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) goto out1; } -#ifdef CONFIG_SH_HICOSH4 - /* truly reset the chip */ - writeword(ioaddr, ADD_PORT, 0x0114); - writeword(ioaddr, DATA_PORT, 0x0040); -#endif - /* if they give us an odd I/O address, then do ONE write to the address port, to get it back to address zero, where we expect to find the EISA signature word. An IO with a base of 0x3 @@ -650,37 +597,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) the driver will always do *something* instead of complain that adapter_cnf is 0. */ -#ifdef CONFIG_SH_HICOSH4 - if (1) { - /* For the HiCO.SH4 board, things are different: we don't - have EEPROM, but there is some data in flash, so we go - get it there directly (MAC). */ - __u16 *confd; - short cnt; - if (((* (volatile __u32 *) 0xa0013ff0) & 0x00ffffff) - == 0x006c3000) { - confd = (__u16*) 0xa0013fc0; - } else { - confd = (__u16*) 0xa001ffc0; - } - cnt = (*confd++ & 0x00ff) >> 1; - while (--cnt > 0) { - __u16 j = *confd++; - - switch (j & 0x0fff) { - case PP_IA: - for (i = 0; i < ETH_ALEN/2; i++) { - dev->dev_addr[i*2] = confd[i] & 0xFF; - dev->dev_addr[i*2+1] = confd[i] >> 8; - } - break; - } - j = (j >> 12) + 1; - confd += j; - cnt -= j; - } - } else -#endif if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == (EEPROM_OK|EEPROM_PRESENT)) { @@ -735,11 +651,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) printk("\n"); /* First check to see if an EEPROM is attached. */ -#ifdef CONFIG_SH_HICOSH4 /* no EEPROM on HiCO, don't hazzle with it here */ - if (1) { - printk(KERN_NOTICE "cs89x0: No EEPROM on HiCO.SH4\n"); - } else -#endif + if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0) printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n"); else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) { @@ -983,7 +895,7 @@ dma_rx(struct net_device *dev) dev->name, (unsigned long)bp, status, length); } if ((status & RX_OK) == 0) { - count_rx_errors(status, lp); + count_rx_errors(status, dev); goto skip_this_frame; } @@ -992,7 +904,7 @@ dma_rx(struct net_device *dev) if (skb == NULL) { if (net_debug) /* I don't think we want to do this to a stressed system */ printk("%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; /* AKPM: advance bp to the next frame */ skip_this_frame: @@ -1022,8 +934,8 @@ dma_rx(struct net_device *dev) } skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; } #endif /* ALLOW_DMA */ @@ -1276,7 +1188,6 @@ net_open(struct net_device *dev) int i; int ret; -#if !defined(CONFIG_SH_HICOSH4) && !defined(CONFIG_ARCH_PNX010X) /* uses irq#1, so this won't work */ if (dev->irq < 2) { /* Allow interrupts to be generated by the chip */ /* Cirrus' release had this: */ @@ -1305,7 +1216,6 @@ net_open(struct net_device *dev) } } else -#endif { #ifndef CONFIG_CS89x0_NONISA_IRQ if (((1 << dev->irq) & lp->irq_map) == 0) { @@ -1391,9 +1301,6 @@ net_open(struct net_device *dev) case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break; default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2); } -#ifdef CONFIG_ARCH_PNX010X - result = A_CNF_10B_T; -#endif if (!result) { printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); release_dma: @@ -1552,7 +1459,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev) /* Write the contents of the packet */ writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); spin_unlock_irqrestore(&lp->lock, flags); - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; dev_kfree_skb (skb); /* @@ -1598,18 +1505,23 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) net_rx(dev); break; case ISQ_TRANSMITTER_EVENT: - lp->stats.tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); /* Inform upper layers. */ if ((status & ( TX_OK | TX_LOST_CRS | TX_SQE_ERROR | TX_LATE_COL | TX_16_COL)) != TX_OK) { - if ((status & TX_OK) == 0) lp->stats.tx_errors++; - if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++; - if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++; - if (status & TX_LATE_COL) lp->stats.tx_window_errors++; - if (status & TX_16_COL) lp->stats.tx_aborted_errors++; + if ((status & TX_OK) == 0) + dev->stats.tx_errors++; + if (status & TX_LOST_CRS) + dev->stats.tx_carrier_errors++; + if (status & TX_SQE_ERROR) + dev->stats.tx_heartbeat_errors++; + if (status & TX_LATE_COL) + dev->stats.tx_window_errors++; + if (status & TX_16_COL) + dev->stats.tx_aborted_errors++; } break; case ISQ_BUFFER_EVENT: @@ -1651,10 +1563,10 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) #endif break; case ISQ_RX_MISS_EVENT: - lp->stats.rx_missed_errors += (status >>6); + dev->stats.rx_missed_errors += (status >> 6); break; case ISQ_TX_COL_EVENT: - lp->stats.collisions += (status >>6); + dev->stats.collisions += (status >> 6); break; } } @@ -1662,22 +1574,24 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) } static void -count_rx_errors(int status, struct net_local *lp) +count_rx_errors(int status, struct net_device *dev) { - lp->stats.rx_errors++; - if (status & RX_RUNT) lp->stats.rx_length_errors++; - if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++; - if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT))) + dev->stats.rx_errors++; + if (status & RX_RUNT) + dev->stats.rx_length_errors++; + if (status & RX_EXTRA_DATA) + dev->stats.rx_length_errors++; + if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT))) /* per str 172 */ - lp->stats.rx_crc_errors++; - if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++; + dev->stats.rx_crc_errors++; + if (status & RX_DRIBBLE) + dev->stats.rx_frame_errors++; } /* We have a good packet(s), get it/them out of the buffers. */ static void net_rx(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; int status, length; @@ -1686,7 +1600,7 @@ net_rx(struct net_device *dev) length = readword(ioaddr, RX_FRAME_PORT); if ((status & RX_OK) == 0) { - count_rx_errors(status, lp); + count_rx_errors(status, dev); return; } @@ -1696,7 +1610,7 @@ net_rx(struct net_device *dev) #if 0 /* Again, this seems a cruel thing to do */ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); #endif - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_reserve(skb, 2); /* longword align L3 header */ @@ -1713,8 +1627,8 @@ net_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; } #if ALLOW_DMA @@ -1765,11 +1679,11 @@ net_get_stats(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); /* Update the statistics from the device registers. */ - lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); - lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6); + dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); + dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6); spin_unlock_irqrestore(&lp->lock, flags); - return &lp->stats; + return &dev->stats; } static void set_multicast_list(struct net_device *dev) diff --git a/drivers/net/cs89x0.h b/drivers/net/cs89x0.h index 204ed37fa9d5a81fc23f28c7c82c1cf16ee02502..91423b70bb45f8544915b7d386c949afb51bcaba 100644 --- a/drivers/net/cs89x0.h +++ b/drivers/net/cs89x0.h @@ -437,11 +437,7 @@ #define IRQ_MAP_EEPROM_DATA 0x0046 /* Offset into eeprom for the IRQ map */ #define IRQ_MAP_LEN 0x0004 /* No of bytes to read for the IRQ map */ #define PNP_IRQ_FRMT 0x0022 /* PNP small item IRQ format */ -#ifdef CONFIG_SH_HICOSH4 -#define CS8900_IRQ_MAP 0x0002 /* HiCO-SH4 board has its IRQ on #1 */ -#else #define CS8900_IRQ_MAP 0x1c20 /* This IRQ map is fixed */ -#endif #define CS8920_NO_INTS 0x0F /* Max CS8920 interrupt select # */ diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index e3f1b856649521b1c0c1841254b6e9d21a361305..066fd5b09fda8fc61d36b8e08456ea96f5e135f6 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2311,15 +2311,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; /* Check t.len sanity ? */ - fw_data = kmalloc(t.len, GFP_KERNEL); - if (!fw_data) - return -ENOMEM; - - if (copy_from_user - (fw_data, useraddr + sizeof(t), t.len)) { - kfree(fw_data); - return -EFAULT; - } + fw_data = memdup_user(useraddr + sizeof(t), t.len); + if (IS_ERR(fw_data)) + return PTR_ERR(fw_data); ret = t3_load_fw(adapter, fw_data, t.len); kfree(fw_data); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 5962b911b5bd34e9d1433bc6ab99976c066838f5..8ff96c6f6de53304789a16de04a98ac86d1f7d15 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -203,15 +203,11 @@ static inline void refill_rspq(struct adapter *adapter, */ static inline int need_skb_unmap(void) { - /* - * This structure is used to tell if the platform needs buffer - * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. - */ - struct dummy { - DEFINE_DMA_UNMAP_ADDR(addr); - }; - - return sizeof(struct dummy) != 0; +#ifdef CONFIG_NEED_DMA_MAP_STATE + return 1; +#else + return 0; +#endif } /** diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 95a8ba0759f1aa61bf13517db07d94f644897443..427c451be1a78d64b6ed7ca4e710ce4fddee2192 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -679,14 +679,6 @@ int t3_seeprom_wp(struct adapter *adapter, int enable) return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } -/* - * Convert a character holding a hex digit to a number. - */ -static unsigned int hex2int(unsigned char c) -{ - return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10; -} - /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read @@ -727,15 +719,15 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) p->port_type[0] = uses_xaui(adapter) ? 1 : 2; p->port_type[1] = uses_xaui(adapter) ? 6 : 2; } else { - p->port_type[0] = hex2int(vpd.port0_data[0]); - p->port_type[1] = hex2int(vpd.port1_data[0]); + p->port_type[0] = hex_to_bin(vpd.port0_data[0]); + p->port_type[1] = hex_to_bin(vpd.port1_data[0]); p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); } for (i = 0; i < 6; i++) - p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 + - hex2int(vpd.na_data[2 * i + 1]); + p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 + + hex_to_bin(vpd.na_data[2 * i + 1]); return 0; } diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h index 9d0bd9dd9ab1344155479a7ea2d42f788c8a83b3..8bda06e366c826592067c2755a1fc04bf2eca6a2 100644 --- a/drivers/net/cxgb3/version.h +++ b/drivers/net/cxgb3/version.h @@ -35,10 +35,10 @@ #define DRV_DESC "Chelsio T3 Network Driver" #define DRV_NAME "cxgb3" /* Driver version */ -#define DRV_VERSION "1.1.3-ko" +#define DRV_VERSION "1.1.4-ko" /* Firmware version */ #define FW_VERSION_MAJOR 7 -#define FW_VERSION_MINOR 4 +#define FW_VERSION_MINOR 10 #define FW_VERSION_MICRO 0 #endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h index dd1770e075e68c8bbe4b95828c903f5dae0eaa6b..6e562c0dad7dee0a309b87a0e7cec03a91cf69ec 100644 --- a/drivers/net/cxgb4/cxgb4.h +++ b/drivers/net/cxgb4/cxgb4.h @@ -219,6 +219,10 @@ struct adapter_params { struct vpd_params vpd; struct pci_params pci; + unsigned int sf_size; /* serial flash size in bytes */ + unsigned int sf_nsec; /* # of flash sectors */ + unsigned int sf_fw_start; /* start of FW image in flash */ + unsigned int fw_vers; unsigned int tp_vers; u8 api_vers[7]; @@ -290,7 +294,9 @@ struct port_info { u8 rx_offload; /* CSO, etc */ u8 nqsets; /* # of qsets */ u8 first_qset; /* index of first qset */ + u8 rss_mode; struct link_config link_cfg; + u16 *rss; }; /* port_info.rx_offload flags */ @@ -305,7 +311,6 @@ enum { /* adapter flags */ FULL_INIT_DONE = (1 << 0), USING_MSI = (1 << 1), USING_MSIX = (1 << 2), - QUEUES_BOUND = (1 << 3), FW_OK = (1 << 4), }; @@ -477,7 +482,8 @@ struct adapter { struct pci_dev *pdev; struct device *pdev_dev; unsigned long registered_device_map; - unsigned long flags; + unsigned int fn; + unsigned int flags; const char *name; int msg_enable; @@ -646,6 +652,7 @@ void t4_intr_disable(struct adapter *adapter); void t4_intr_clear(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter); +int t4_wait_dev_ready(struct adapter *adap); int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc); int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index 58045b00cf40729ac54745bba13fdedc2cb11741..c327527fbbc854d6cf4f63d0457221cf434869e6 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -67,7 +67,7 @@ #include "t4fw_api.h" #include "l2t.h" -#define DRV_VERSION "1.0.0-ko" +#define DRV_VERSION "1.3.0-ko" #define DRV_DESC "Chelsio T4 Network Driver" /* @@ -77,6 +77,76 @@ */ #define MAX_SGE_TIMERVAL 200U +#ifdef CONFIG_PCI_IOV +/* + * Virtual Function provisioning constants. We need two extra Ingress Queues + * with Interrupt capability to serve as the VF's Firmware Event Queue and + * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free + * Lists associated with them). For each Ethernet/Control Egress Queue and + * for each Free List, we need an Egress Context. + */ +enum { + VFRES_NPORTS = 1, /* # of "ports" per VF */ + VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ + + VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ + VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ + VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ + VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ + VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ + VFRES_TC = 0, /* PCI-E traffic class */ + VFRES_NEXACTF = 16, /* # of exact MPS filters */ + + VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, + VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, +}; + +/* + * Provide a Port Access Rights Mask for the specified PF/VF. This is very + * static and likely not to be useful in the long run. We really need to + * implement some form of persistent configuration which the firmware + * controls. + */ +static unsigned int pfvfres_pmask(struct adapter *adapter, + unsigned int pf, unsigned int vf) +{ + unsigned int portn, portvec; + + /* + * Give PF's access to all of the ports. + */ + if (vf == 0) + return FW_PFVF_CMD_PMASK_MASK; + + /* + * For VFs, we'll assign them access to the ports based purely on the + * PF. We assign active ports in order, wrapping around if there are + * fewer active ports than PFs: e.g. active port[pf % nports]. + * Unfortunately the adapter's port_info structs haven't been + * initialized yet so we have to compute this. + */ + if (adapter->params.nports == 0) + return 0; + + portn = pf % adapter->params.nports; + portvec = adapter->params.portvec; + for (;;) { + /* + * Isolate the lowest set bit in the port vector. If we're at + * the port number that we want, return that as the pmask. + * otherwise mask that bit out of the port vector and + * decrement our port number ... + */ + unsigned int pmask = portvec ^ (portvec & (portvec-1)); + if (portn == 0) + return pmask; + portn--; + portvec &= ~pmask; + } + /*NOTREACHED*/ +} +#endif + enum { MEMWIN0_APERTURE = 65536, MEMWIN0_BASE = 0x30000, @@ -101,10 +171,20 @@ enum { NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) -#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 } +#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { - CH_DEVICE(0xa000), /* PE10K */ + CH_DEVICE(0xa000, 0), /* PE10K */ + CH_DEVICE(0x4001, 0), + CH_DEVICE(0x4002, 0), + CH_DEVICE(0x4003, 0), + CH_DEVICE(0x4004, 0), + CH_DEVICE(0x4005, 0), + CH_DEVICE(0x4006, 0), + CH_DEVICE(0x4007, 0), + CH_DEVICE(0x4008, 0), + CH_DEVICE(0x4009, 0), + CH_DEVICE(0x400a, 0), { 0, } }; @@ -216,7 +296,7 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) void t4_os_portmod_changed(const struct adapter *adap, int port_id) { static const char *mod_str[] = { - NULL, "LR", "SR", "ER", "passive DA", "active DA" + NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" }; const struct net_device *dev = adap->port[port_id]; @@ -224,7 +304,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) netdev_info(dev, "port module unplugged\n"); - else + else if (pi->mod_type < ARRAY_SIZE(mod_str)) netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); } @@ -244,12 +324,13 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) int uc_cnt = netdev_uc_count(dev); int mc_cnt = netdev_mc_count(dev); const struct port_info *pi = netdev_priv(dev); + unsigned int mb = pi->adapter->fn; /* first do the secondary unicast addresses */ netdev_for_each_uc_addr(ha, dev) { addr[naddr++] = ha->addr; if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, naddr, addr, filt_idx, &uhash, sleep); if (ret < 0) return ret; @@ -263,7 +344,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) netdev_for_each_mc_addr(ha, dev) { addr[naddr++] = ha->addr; if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, naddr, addr, filt_idx, &mhash, sleep); if (ret < 0) return ret; @@ -273,7 +354,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) } } - return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0, + return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, uhash | mhash, sleep); } @@ -288,7 +369,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) ret = set_addr_filters(dev, sleep_ok); if (ret == 0) - ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, + ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, sleep_ok); @@ -305,15 +386,16 @@ static int link_start(struct net_device *dev) { int ret; struct port_info *pi = netdev_priv(dev); + unsigned int mb = pi->adapter->fn; /* * We do not set address filters and promiscuity here, the stack does * that step explicitly. */ - ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, + ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, pi->vlan_grp != NULL, true); if (ret == 0) { - ret = t4_change_mac(pi->adapter, 0, pi->viid, + ret = t4_change_mac(pi->adapter, mb, pi->viid, pi->xact_addr_filt, dev->dev_addr, true, true); if (ret >= 0) { @@ -322,9 +404,10 @@ static int link_start(struct net_device *dev) } } if (ret == 0) - ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg); + ret = t4_link_start(pi->adapter, mb, pi->tx_chan, + &pi->link_cfg); if (ret == 0) - ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true); + ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true); return ret; } @@ -526,31 +609,48 @@ static void free_msix_queue_irqs(struct adapter *adap) free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); } +/** + * write_rss - write the RSS table for a given port + * @pi: the port + * @queues: array of queue indices for RSS + * + * Sets up the portion of the HW RSS table for the port's VI to distribute + * packets to the Rx queues in @queues. + */ +static int write_rss(const struct port_info *pi, const u16 *queues) +{ + u16 *rss; + int i, err; + const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset]; + + rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); + if (!rss) + return -ENOMEM; + + /* map the queue indices to queue ids */ + for (i = 0; i < pi->rss_size; i++, queues++) + rss[i] = q[*queues].rspq.abs_id; + + err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0, + pi->rss_size, rss, pi->rss_size); + kfree(rss); + return err; +} + /** * setup_rss - configure RSS * @adap: the adapter * - * Sets up RSS to distribute packets to multiple receive queues. We - * configure the RSS CPU lookup table to distribute to the number of HW - * receive queues, and the response queue lookup table to narrow that - * down to the response queues actually configured for each port. - * We always configure the RSS mapping for all ports since the mapping - * table has plenty of entries. + * Sets up RSS for each port. */ static int setup_rss(struct adapter *adap) { - int i, j, err; - u16 rss[MAX_ETH_QSETS]; + int i, err; for_each_port(adap, i) { const struct port_info *pi = adap2pinfo(adap, i); - const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; - - for (j = 0; j < pi->nqsets; j++) - rss[j] = q[j].rspq.abs_id; - err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size, - rss, pi->nqsets); + err = write_rss(pi, pi->rss); if (err) return err; } @@ -963,10 +1063,11 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, * Return a version number to identify the type of adapter. The scheme is: * - bits 0..9: chip version * - bits 10..15: chip revision + * - bits 16..23: register dump version */ static inline unsigned int mk_adap_vers(const struct adapter *ap) { - return 4 | (ap->params.rev << 10); + return 4 | (ap->params.rev << 10) | (1 << 16); } static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, @@ -1041,7 +1142,9 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, 0xf000, 0x11190, - 0x19040, 0x19124, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, 0x19150, 0x191b0, 0x191d0, 0x191e8, 0x19238, 0x1924c, @@ -1054,49 +1157,49 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0x1a190, 0x1a1c4, 0x1a1fc, 0x1a1fc, 0x1e040, 0x1e04c, - 0x1e240, 0x1e28c, + 0x1e284, 0x1e28c, 0x1e2c0, 0x1e2c0, 0x1e2e0, 0x1e2e0, 0x1e300, 0x1e384, 0x1e3c0, 0x1e3c8, 0x1e440, 0x1e44c, - 0x1e640, 0x1e68c, + 0x1e684, 0x1e68c, 0x1e6c0, 0x1e6c0, 0x1e6e0, 0x1e6e0, 0x1e700, 0x1e784, 0x1e7c0, 0x1e7c8, 0x1e840, 0x1e84c, - 0x1ea40, 0x1ea8c, + 0x1ea84, 0x1ea8c, 0x1eac0, 0x1eac0, 0x1eae0, 0x1eae0, 0x1eb00, 0x1eb84, 0x1ebc0, 0x1ebc8, 0x1ec40, 0x1ec4c, - 0x1ee40, 0x1ee8c, + 0x1ee84, 0x1ee8c, 0x1eec0, 0x1eec0, 0x1eee0, 0x1eee0, 0x1ef00, 0x1ef84, 0x1efc0, 0x1efc8, 0x1f040, 0x1f04c, - 0x1f240, 0x1f28c, + 0x1f284, 0x1f28c, 0x1f2c0, 0x1f2c0, 0x1f2e0, 0x1f2e0, 0x1f300, 0x1f384, 0x1f3c0, 0x1f3c8, 0x1f440, 0x1f44c, - 0x1f640, 0x1f68c, + 0x1f684, 0x1f68c, 0x1f6c0, 0x1f6c0, 0x1f6e0, 0x1f6e0, 0x1f700, 0x1f784, 0x1f7c0, 0x1f7c8, 0x1f840, 0x1f84c, - 0x1fa40, 0x1fa8c, + 0x1fa84, 0x1fa8c, 0x1fac0, 0x1fac0, 0x1fae0, 0x1fae0, 0x1fb00, 0x1fb84, 0x1fbc0, 0x1fbc8, 0x1fc40, 0x1fc4c, - 0x1fe40, 0x1fe8c, + 0x1fe84, 0x1fe8c, 0x1fec0, 0x1fec0, 0x1fee0, 0x1fee0, 0x1ff00, 0x1ff84, @@ -1217,16 +1320,18 @@ static int restart_autoneg(struct net_device *dev) return -EAGAIN; if (p->link_cfg.autoneg != AUTONEG_ENABLE) return -EINVAL; - t4_restart_aneg(p->adapter, 0, p->tx_chan); + t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); return 0; } static int identify_port(struct net_device *dev, u32 data) { + struct adapter *adap = netdev2adap(dev); + if (data == 0) data = 2; /* default to 2 seconds */ - return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid, + return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, data * 5); } @@ -1234,7 +1339,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) { unsigned int v = 0; - if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) { + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || + type == FW_PORT_TYPE_BT_XAUI) { v |= SUPPORTED_TP; if (caps & FW_PORT_CAP_SPEED_100M) v |= SUPPORTED_100baseT_Full; @@ -1250,7 +1356,10 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) v |= SUPPORTED_10000baseKX4_Full; } else if (type == FW_PORT_TYPE_KR) v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; - else if (type == FW_PORT_TYPE_FIBER) + else if (type == FW_PORT_TYPE_BP_AP) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC; + else if (type == FW_PORT_TYPE_FIBER_XFI || + type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) v |= SUPPORTED_FIBRE; if (caps & FW_PORT_CAP_ANEG) @@ -1276,13 +1385,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) const struct port_info *p = netdev_priv(dev); if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XFI || p->port_type == FW_PORT_TYPE_BT_XAUI) cmd->port = PORT_TP; - else if (p->port_type == FW_PORT_TYPE_FIBER) + else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || + p->port_type == FW_PORT_TYPE_FIBER_XAUI) cmd->port = PORT_FIBRE; - else if (p->port_type == FW_PORT_TYPE_TWINAX) - cmd->port = PORT_DA; - else + else if (p->port_type == FW_PORT_TYPE_SFP) { + if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + cmd->port = PORT_DA; + else + cmd->port = PORT_FIBRE; + } else cmd->port = PORT_OTHER; if (p->mdio_addr >= 0) { @@ -1356,7 +1471,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) lc->autoneg = cmd->autoneg; if (netif_running(dev)) - return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); return 0; } @@ -1388,7 +1504,8 @@ static int set_pauseparam(struct net_device *dev, if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (netif_running(dev)) - return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); return 0; } @@ -1520,7 +1637,8 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | FW_PARAMS_PARAM_YZ(q->cntxt_id); - err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx); + err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, + &new_idx); if (err) return err; } @@ -1708,27 +1826,114 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return err; } +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) + static int set_tso(struct net_device *dev, u32 value) { if (value) - dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + dev->features |= TSO_FLAGS; else - dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + dev->features &= ~TSO_FLAGS; return 0; } static int set_flags(struct net_device *dev, u32 flags) { - if (flags & ~ETH_FLAG_RXHASH) - return -EOPNOTSUPP; + return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH); +} - if (flags & ETH_FLAG_RXHASH) - dev->features |= NETIF_F_RXHASH; - else - dev->features &= ~NETIF_F_RXHASH; +static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) +{ + const struct port_info *pi = netdev_priv(dev); + unsigned int n = min_t(unsigned int, p->size, pi->rss_size); + + p->size = pi->rss_size; + while (n--) + p->ring_index[n] = pi->rss[n]; return 0; } +static int set_rss_table(struct net_device *dev, + const struct ethtool_rxfh_indir *p) +{ + unsigned int i; + struct port_info *pi = netdev_priv(dev); + + if (p->size != pi->rss_size) + return -EINVAL; + for (i = 0; i < p->size; i++) + if (p->ring_index[i] >= pi->nqsets) + return -EINVAL; + for (i = 0; i < p->size; i++) + pi->rss[i] = p->ring_index[i]; + if (pi->adapter->flags & FULL_INIT_DONE) + return write_rss(pi, pi->rss); + return 0; +} + +static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + void *rules) +{ + const struct port_info *pi = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + unsigned int v = pi->rss_mode; + + info->data = 0; + switch (info->flow_type) { + case TCP_V4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V4_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + } + return 0; + } + case ETHTOOL_GRXRINGS: + info->data = pi->nqsets; + return 0; + } + return -EOPNOTSUPP; +} + static struct ethtool_ops cxgb_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, @@ -1760,6 +1965,9 @@ static struct ethtool_ops cxgb_ethtool_ops = { .set_wol = set_wol, .set_tso = set_tso, .set_flags = set_flags, + .get_rxnfc = get_rxnfc, + .get_rxfh_indir = get_rss_table, + .set_rxfh_indir = set_rss_table, .flash_device = set_flash, }; @@ -2306,9 +2514,11 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.adapter_type = adap->params.rev; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); + t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> + (adap->fn * 4)); lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); + t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> + (adap->fn * 4)); lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); lli.fw_vers = adap->params.fw_vers; @@ -2483,6 +2693,7 @@ static void cxgb_down(struct adapter *adapter) t4_intr_disable(adapter); cancel_work_sync(&adapter->tid_release_task); adapter->tid_release_task_busy = false; + adapter->tid_release_head = NULL; if (adapter->flags & USING_MSIX) { free_msix_queue_irqs(adapter); @@ -2511,9 +2722,10 @@ static int cxgb_open(struct net_device *dev) } dev->real_num_tx_queues = pi->nqsets; - link_start(dev); - netif_tx_start_all_queues(dev); - return 0; + err = link_start(dev); + if (!err) + netif_tx_start_all_queues(dev); + return err; } static int cxgb_close(struct net_device *dev) @@ -2523,15 +2735,15 @@ static int cxgb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - return t4_enable_vi(adapter, 0, pi->viid, false, false); + return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); } -static struct net_device_stats *cxgb_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *ns) { struct port_stats stats; struct port_info *p = netdev_priv(dev); struct adapter *adapter = p->adapter; - struct net_device_stats *ns = &dev->stats; spin_lock(&adapter->stats_lock); t4_get_port_stats(adapter, p->tx_chan, &stats); @@ -2570,6 +2782,7 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev) static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { + unsigned int mbox; int ret = 0, prtad, devad; struct port_info *pi = netdev_priv(dev); struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; @@ -2592,11 +2805,12 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) } else return -EINVAL; + mbox = pi->adapter->fn; if (cmd == SIOCGMIIREG) - ret = t4_mdio_rd(pi->adapter, 0, prtad, devad, + ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, data->reg_num, &data->val_out); else - ret = t4_mdio_wr(pi->adapter, 0, prtad, devad, + ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, data->reg_num, data->val_in); break; default: @@ -2618,8 +2832,8 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ return -EINVAL; - ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1, - true); + ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, + -1, -1, -1, true); if (!ret) dev->mtu = new_mtu; return ret; @@ -2634,8 +2848,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; - ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt, - addr->sa_data, true, true); + ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, + pi->xact_addr_filt, addr->sa_data, true, true); if (ret < 0) return ret; @@ -2649,8 +2863,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) struct port_info *pi = netdev_priv(dev); pi->vlan_grp = grp; - t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL, - true); + t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1, + grp != NULL, true); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2674,7 +2888,7 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, .ndo_start_xmit = t4_eth_xmit, - .ndo_get_stats = cxgb_get_stats, + .ndo_get_stats64 = cxgb_get_stats, .ndo_set_rx_mode = cxgb_set_rxmode, .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -2707,6 +2921,76 @@ static void setup_memwin(struct adapter *adap) t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), (bar0 + MEMWIN2_BASE) | BIR(0) | WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); + if (adap->vres.ocq.size) { + unsigned int start, sz_kb; + + start = pci_resource_start(adap->pdev, 2) + + OCQ_WIN_OFFSET(adap->pdev, &adap->vres); + sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), + start | BIR(1) | WINDOW(ilog2(sz_kb))); + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), + adap->vres.ocq.start); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); + } +} + +static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) +{ + u32 v; + int ret; + + /* get device capabilities */ + memset(c, 0, sizeof(*c)); + c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + c->retval_len16 = htonl(FW_LEN16(*c)); + ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); + if (ret < 0) + return ret; + + /* select capabilities we'll be using */ + if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adap->pdev_dev, "virtualization ACLs not supported"); + return ret; + } + c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); + if (ret < 0) + return ret; + + ret = t4_config_glbl_rss(adap, adap->fn, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + if (ret < 0) + return ret; + + ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, + 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); + if (ret < 0) + return ret; + + t4_sge_init(adap); + + /* tweak some settings */ + t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); + t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); + t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); + v = t4_read_reg(adap, TP_PIO_DATA); + t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); + + /* get basic stuff going */ + return t4_early_init(adap, adap->fn); } /* @@ -2734,7 +3018,7 @@ static int adap_init0(struct adapter *adap) return ret; /* contact FW, request master */ - ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state); + ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); @@ -2742,44 +3026,7 @@ static int adap_init0(struct adapter *adap) } /* reset device */ - ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST); - if (ret < 0) - goto bye; - - /* get device capabilities */ - memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); - c.retval_len16 = htonl(FW_LEN16(c)); - ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); - if (ret < 0) - goto bye; - - /* select capabilities we'll be using */ - if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { - if (!vf_acls) - c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); - else - c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); - } else if (vf_acls) { - dev_err(adap->pdev_dev, "virtualization ACLs not supported"); - goto bye; - } - c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL); - if (ret < 0) - goto bye; - - ret = t4_config_glbl_rss(adap, 0, - FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); - if (ret < 0) - goto bye; - - ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16, - FW_CMD_CAP_PF, FW_CMD_CAP_PF); + ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); if (ret < 0) goto bye; @@ -2790,27 +3037,31 @@ static int adap_init0(struct adapter *adap) for (v = 1; v < SGE_NCOUNTERS; v++) adap->sge.counter_val[v] = min(intr_cnt[v - 1], THRESHOLD_3_MASK); - t4_sge_init(adap); +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) - /* get basic stuff going */ - ret = t4_early_init(adap, 0); + params[0] = FW_PARAM_DEV(CCLK); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); if (ret < 0) goto bye; + adap->params.vpd.cclk = val[0]; -#define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + ret = adap_init1(adap, &c); + if (ret < 0) + goto bye; #define FW_PARAM_PFVF(param) \ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ + FW_PARAMS_PARAM_Y(adap->fn)) params[0] = FW_PARAM_DEV(PORTVEC); params[1] = FW_PARAM_PFVF(L2T_START); params[2] = FW_PARAM_PFVF(L2T_END); params[3] = FW_PARAM_PFVF(FILTER_START); params[4] = FW_PARAM_PFVF(FILTER_END); - ret = t4_query_params(adap, 0, 0, 0, 5, params, val); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val); if (ret < 0) goto bye; port_vec = val[0]; @@ -2825,7 +3076,8 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(TDDP_START); params[4] = FW_PARAM_PFVF(TDDP_END); params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); - ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, + val); if (ret < 0) goto bye; adap->tids.ntids = val[0]; @@ -2844,7 +3096,8 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(RQ_END); params[4] = FW_PARAM_PFVF(PBL_START); params[5] = FW_PARAM_PFVF(PBL_END); - ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, + val); if (ret < 0) goto bye; adap->vres.stag.start = val[0]; @@ -2853,11 +3106,29 @@ static int adap_init0(struct adapter *adap) adap->vres.rq.size = val[3] - val[2] + 1; adap->vres.pbl.start = val[4]; adap->vres.pbl.size = val[5] - val[4] + 1; + + params[0] = FW_PARAM_PFVF(SQRQ_START); + params[1] = FW_PARAM_PFVF(SQRQ_END); + params[2] = FW_PARAM_PFVF(CQ_START); + params[3] = FW_PARAM_PFVF(CQ_END); + params[4] = FW_PARAM_PFVF(OCQ_START); + params[5] = FW_PARAM_PFVF(OCQ_END); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, + val); + if (ret < 0) + goto bye; + adap->vres.qp.start = val[0]; + adap->vres.qp.size = val[1] - val[0] + 1; + adap->vres.cq.start = val[2]; + adap->vres.cq.size = val[3] - val[2] + 1; + adap->vres.ocq.start = val[4]; + adap->vres.ocq.size = val[5] - val[4] + 1; } if (c.iscsicaps) { params[0] = FW_PARAM_PFVF(ISCSI_START); params[1] = FW_PARAM_PFVF(ISCSI_END); - ret = t4_query_params(adap, 0, 0, 0, 2, params, val); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, + val); if (ret < 0) goto bye; adap->vres.iscsi.start = val[0]; @@ -2877,12 +3148,41 @@ static int adap_init0(struct adapter *adap) t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); - /* tweak some settings */ - t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); - t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); - t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); - v = t4_read_reg(adap, TP_PIO_DATA); - t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); +#ifdef CONFIG_PCI_IOV + /* + * Provision resource limits for Virtual Functions. We currently + * grant them all the same static resource limits except for the Port + * Access Rights Mask which we're assigning based on the PF. All of + * the static provisioning stuff for both the PF and VF really needs + * to be managed in a persistent manner for each device which the + * firmware controls. + */ + { + int pf, vf; + + for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { + if (num_vf[pf] <= 0) + continue; + + /* VF numbering starts at 1! */ + for (vf = 1; vf <= num_vf[pf]; vf++) { + ret = t4_cfg_pfvf(adap, adap->fn, pf, vf, + VFRES_NEQ, VFRES_NETHCTRL, + VFRES_NIQFLINT, VFRES_NIQ, + VFRES_TC, VFRES_NVI, + FW_PFVF_CMD_CMASK_MASK, + pfvfres_pmask(adap, pf, vf), + VFRES_NEXACTF, + VFRES_R_CAPS, VFRES_WX_CAPS); + if (ret < 0) + dev_warn(adap->pdev_dev, "failed to " + "provision pf/vf=%d/%d; " + "err=%d\n", pf, vf, ret); + } + } + } +#endif + setup_memwin(adap); return 0; @@ -2892,10 +3192,114 @@ static int adap_init0(struct adapter *adap) * commands. */ bye: if (ret != -ETIMEDOUT && ret != -EIO) - t4_fw_bye(adap, 0); + t4_fw_bye(adap, adap->fn); return ret; } +/* EEH callbacks */ + +static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + int i; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) + goto out; + + rtnl_lock(); + adap->flags &= ~FW_OK; + notify_ulds(adap, CXGB4_STATE_START_RECOVERY); + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + + netif_device_detach(dev); + netif_carrier_off(dev); + } + if (adap->flags & FULL_INIT_DONE) + cxgb_down(adap); + rtnl_unlock(); + pci_disable_device(pdev); +out: return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) +{ + int i, ret; + struct fw_caps_config_cmd c; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) { + pci_restore_state(pdev); + pci_save_state(pdev); + return PCI_ERS_RESULT_RECOVERED; + } + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "cannot reenable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (t4_wait_dev_ready(adap) < 0) + return PCI_ERS_RESULT_DISCONNECT; + if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) + return PCI_ERS_RESULT_DISCONNECT; + adap->flags |= FW_OK; + if (adap_init1(adap, &c)) + return PCI_ERS_RESULT_DISCONNECT; + + for_each_port(adap, i) { + struct port_info *p = adap2pinfo(adap, i); + + ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, + NULL, NULL); + if (ret < 0) + return PCI_ERS_RESULT_DISCONNECT; + p->viid = ret; + p->xact_addr_filt = -1; + } + + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + setup_memwin(adap); + if (cxgb_up(adap)) + return PCI_ERS_RESULT_DISCONNECT; + return PCI_ERS_RESULT_RECOVERED; +} + +static void eeh_resume(struct pci_dev *pdev) +{ + int i; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) + return; + + rtnl_lock(); + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + + if (netif_running(dev)) { + link_start(dev); + cxgb_set_rxmode(dev); + } + netif_device_attach(dev); + } + rtnl_unlock(); +} + +static struct pci_error_handlers cxgb4_eeh = { + .error_detected = eeh_err_detected, + .slot_reset = eeh_slot_reset, + .resume = eeh_resume, +}; + static inline bool is_10g_port(const struct link_config *lc) { return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; @@ -3076,10 +3480,27 @@ static int __devinit enable_msix(struct adapter *adap) #undef EXTRA_VECS +static int __devinit init_rss(struct adapter *adap) +{ + unsigned int i, j; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); + if (!pi->rss) + return -ENOMEM; + for (j = 0; j < pi->rss_size; j++) + pi->rss[j] = j % pi->nqsets; + } + return 0; +} + static void __devinit print_port_info(struct adapter *adap) { static const char *base[] = { - "R", "KX4", "T", "KX", "T", "KR", "CX4" + "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", + "KX", "KR", "KR SFP+", "KR FEC" }; int i; @@ -3121,7 +3542,31 @@ static void __devinit print_port_info(struct adapter *adap) } } -#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\ +/* + * Free the following resources: + * - memory used for tables + * - MSI/MSI-X + * - net devices + * - resources FW is holding for us + */ +static void free_some_resources(struct adapter *adapter) +{ + unsigned int i; + + t4_free_mem(adapter->l2t); + t4_free_mem(adapter->tids.tid_tab); + disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) { + kfree(adap2pinfo(adapter, i)->rss); + free_netdev(adapter->port[i]); + } + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, adapter->fn); +} + +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) static int __devinit init_one(struct pci_dev *pdev, @@ -3141,10 +3586,12 @@ static int __devinit init_one(struct pci_dev *pdev, return err; } - /* We control everything through PF 0 */ + /* We control everything through one PF */ func = PCI_FUNC(pdev->devfn); - if (func > 0) + if (func != ent->driver_data) { + pci_save_state(pdev); /* to restore SR-IOV later */ goto sriov; + } err = pci_enable_device(pdev); if (err) { @@ -3187,6 +3634,7 @@ static int __devinit init_one(struct pci_dev *pdev, adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; + adapter->fn = func; adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); @@ -3225,7 +3673,7 @@ static int __devinit init_one(struct pci_dev *pdev, netif_tx_stop_all_queues(netdev); netdev->irq = pdev->irq; - netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + netdev->features |= NETIF_F_SG | TSO_FLAGS; netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; @@ -3238,7 +3686,7 @@ static int __devinit init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, adapter); if (adapter->flags & FW_OK) { - err = t4_port_init(adapter, 0, 0, 0); + err = t4_port_init(adapter, func, func, 0); if (err) goto out_free_dev; } @@ -3262,6 +3710,16 @@ static int __devinit init_one(struct pci_dev *pdev, adapter->params.offload = 0; } + /* See what interrupts we'll be using */ + if (msi > 1 && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + err = init_rss(adapter); + if (err) + goto out_free_dev; + /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only @@ -3297,12 +3755,6 @@ static int __devinit init_one(struct pci_dev *pdev, setup_debugfs(adapter); } - /* See what interrupts we'll be using */ - if (msi > 1 && enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; - else if (msi > 0 && pci_enable_msi(pdev) == 0) - adapter->flags |= USING_MSI; - if (is_offload(adapter)) attach_ulds(adapter); @@ -3319,13 +3771,7 @@ static int __devinit init_one(struct pci_dev *pdev, return 0; out_free_dev: - t4_free_mem(adapter->tids.tid_tab); - t4_free_mem(adapter->l2t); - for_each_port(adapter, i) - if (adapter->port[i]) - free_netdev(adapter->port[i]); - if (adapter->flags & FW_OK) - t4_fw_bye(adapter, 0); + free_some_resources(adapter); out_unmap_bar: iounmap(adapter->regs); out_free_adapter: @@ -3360,16 +3806,8 @@ static void __devexit remove_one(struct pci_dev *pdev) if (adapter->flags & FULL_INIT_DONE) cxgb_down(adapter); - t4_free_mem(adapter->l2t); - t4_free_mem(adapter->tids.tid_tab); - disable_msi(adapter); - - for_each_port(adapter, i) - if (adapter->port[i]) - free_netdev(adapter->port[i]); - if (adapter->flags & FW_OK) - t4_fw_bye(adapter, 0); + free_some_resources(adapter); iounmap(adapter->regs); kfree(adapter); pci_disable_pcie_error_reporting(pdev); @@ -3385,6 +3823,7 @@ static struct pci_driver cxgb4_driver = { .id_table = cxgb4_pci_tbl, .probe = init_one, .remove = __devexit_p(remove_one), + .err_handler = &cxgb4_eeh, }; static int __init cxgb4_init_module(void) diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h index 5b98546ac92d8c5c0da1915882656b6ad0c28f58..85d74e751ce00b33d88971f030a73771d2f7f2a4 100644 --- a/drivers/net/cxgb4/cxgb4_uld.h +++ b/drivers/net/cxgb4/cxgb4_uld.h @@ -185,8 +185,14 @@ struct cxgb4_virt_res { /* virtualized HW resources */ struct cxgb4_range stag; struct cxgb4_range rq; struct cxgb4_range pbl; + struct cxgb4_range qp; + struct cxgb4_range cq; + struct cxgb4_range ocq; }; +#define OCQ_WIN_OFFSET(pdev, vres) \ + (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) + /* * Block of information the LLD provides to ULDs attaching to a device. */ diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c index 9f96724a133a7a46bba15b2f196511d659837b2e..e8f0f55e9d08e7c537a129f271b34891e5d282c2 100644 --- a/drivers/net/cxgb4/l2t.c +++ b/drivers/net/cxgb4/l2t.c @@ -310,6 +310,13 @@ static void t4_l2e_free(struct l2t_entry *e) neigh_release(e->neigh); e->neigh = NULL; } + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + kfree_skb(skb); + } + e->arpq_tail = NULL; } spin_unlock_bh(&e->lock); diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c index d1f8f225e45a7799dd6e028a5980c39357907e5d..bf38cfc575655650b46038f9722170c801f75070 100644 --- a/drivers/net/cxgb4/sge.c +++ b/drivers/net/cxgb4/sge.c @@ -938,16 +938,16 @@ out_free: dev_kfree_skb(skb); wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | FW_WR_IMMDLEN(sizeof(*lso))); - lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE | LSO_LAST_SLICE | - LSO_IPV6(v6) | - LSO_ETHHDR_LEN(eth_xtra_len / 4) | - LSO_IPHDR_LEN(l3hdr_len / 4) | - LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); - lso->ipid_ofst = htons(0); - lso->mss = htons(ssi->gso_size); - lso->seqno_offset = htonl(0); - lso->len = htonl(skb->len); + lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len / 4) | + LSO_IPHDR_LEN(l3hdr_len / 4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->c.ipid_ofst = htons(0); + lso->c.mss = htons(ssi->gso_size); + lso->c.seqno_offset = htonl(0); + lso->c.len = htonl(skb->len); cpl = (void *)(lso + 1); cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | TXPKT_IPHDR_LEN(l3hdr_len) | @@ -1593,14 +1593,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, if (csum_ok && (pi->rx_offload & RX_CSO) && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { - if (!pkt->ip_frag) + if (!pkt->ip_frag) { skb->ip_summed = CHECKSUM_UNNECESSARY; - else { + rxq->stats.rx_cso++; + } else if (pkt->l2info & htonl(RXF_IP)) { __sum16 c = (__force __sum16)pkt->csum; skb->csum = csum_unfold(c); skb->ip_summed = CHECKSUM_COMPLETE; + rxq->stats.rx_cso++; } - rxq->stats.rx_cso++; } else skb->ip_summed = CHECKSUM_NONE; @@ -1718,7 +1719,7 @@ static int process_responses(struct sge_rspq *q, int budget) free_rx_bufs(q->adap, &rxq->fl, 1); q->offset = 0; } - len &= RSPD_LEN; + len = RSPD_LEN(len); } si.tot_len = len; @@ -1998,7 +1999,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC | - FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0)); + FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0)); c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | FW_LEN16(c)); c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | @@ -2030,7 +2031,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.fl0addr = cpu_to_be64(fl->addr); } - ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); if (ret) goto err; @@ -2109,7 +2110,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0)); + FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); @@ -2122,7 +2123,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, FW_EQ_ETH_CMD_EQSIZE(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); txq->q.sdesc = NULL; @@ -2159,7 +2160,8 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0)); + FW_EQ_CTRL_CMD_PFN(adap->fn) | + FW_EQ_CTRL_CMD_VFN(0)); c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); @@ -2173,7 +2175,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, FW_EQ_CTRL_CMD_EQSIZE(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); if (ret) { dma_free_coherent(adap->pdev_dev, nentries * sizeof(struct tx_desc), @@ -2209,7 +2211,8 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0)); + FW_EQ_OFLD_CMD_PFN(adap->fn) | + FW_EQ_OFLD_CMD_VFN(0)); c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | @@ -2221,7 +2224,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, FW_EQ_OFLD_CMD_EQSIZE(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); txq->q.sdesc = NULL; @@ -2257,8 +2260,8 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; adap->sge.ingr_map[rq->cntxt_id] = NULL; - t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, - 0xffff); + t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, + rq->cntxt_id, fl_id, 0xffff); dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, rq->desc, rq->phys_addr); netif_napi_del(&rq->napi); @@ -2295,7 +2298,8 @@ void t4_free_sge_resources(struct adapter *adap) if (eq->rspq.desc) free_rspq_fl(adap, &eq->rspq, &eq->fl); if (etq->q.desc) { - t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id); + t4_eth_eq_free(adap, adap->fn, adap->fn, 0, + etq->q.cntxt_id); free_tx_desc(adap, &etq->q, etq->q.in_use, true); kfree(etq->q.sdesc); free_txq(adap, &etq->q); @@ -2318,7 +2322,8 @@ void t4_free_sge_resources(struct adapter *adap) if (q->q.desc) { tasklet_kill(&q->qresume_tsk); - t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id); + t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, + q->q.cntxt_id); free_tx_desc(adap, &q->q, q->q.in_use, false); kfree(q->q.sdesc); __skb_queue_purge(&q->sendq); @@ -2332,7 +2337,8 @@ void t4_free_sge_resources(struct adapter *adap) if (cq->q.desc) { tasklet_kill(&cq->qresume_tsk); - t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id); + t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, + cq->q.cntxt_id); __skb_queue_purge(&cq->sendq); free_txq(adap, &cq->q); } @@ -2400,6 +2406,7 @@ void t4_sge_stop(struct adapter *adap) */ void t4_sge_init(struct adapter *adap) { + unsigned int i, v; struct sge *s = &adap->sge; unsigned int fl_align_log = ilog2(FL_ALIGN); @@ -2408,8 +2415,10 @@ void t4_sge_init(struct adapter *adap) INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | RXPKTCPLMODE | (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); - t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK, - HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); + + for (i = v = 0; i < 32; i += 4) + v |= (PAGE_SHIFT - 10) << i; + t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); #if FL_PG_ORDER > 0 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c index da272a98fdbc3b6d5b3181ecc68769fdbbff7b69..9e1a4b49b47a56821851d065eb1e3e6f64cbb86b 100644 --- a/drivers/net/cxgb4/t4_hw.c +++ b/drivers/net/cxgb4/t4_hw.c @@ -221,6 +221,13 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, if ((size & 15) || size > MBOX_LEN) return -EINVAL; + /* + * If the device is off-line, as in EEH, commands will time out. + * Fail them early so we don't waste time waiting. + */ + if (adap->pdev->error_state != pci_channel_io_normal) + return -EIO; + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); @@ -449,12 +456,10 @@ enum { SF_RD_STATUS = 5, /* read status register */ SF_WR_ENABLE = 6, /* enable writes */ SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ SF_ERASE_SECTOR = 0xd8, /* erase sector */ - FW_START_SEC = 8, /* first flash sector for FW */ - FW_END_SEC = 15, /* last flash sector for FW */ - FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, - FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, + FW_MAX_SIZE = 512 * 1024, }; /** @@ -558,7 +563,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr, { int ret; - if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) + if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) return -EINVAL; addr = swab32(addr) | SF_RD_DATA_FAST; @@ -596,7 +601,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr, u32 buf[64]; unsigned int i, c, left, val, offset = addr & 0xff; - if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE) + if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) return -EINVAL; val = swab32(addr) | SF_PROG_PAGE; @@ -614,7 +619,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr, if (ret) goto unlock; } - ret = flash_wait_op(adapter, 5, 1); + ret = flash_wait_op(adapter, 8, 1); if (ret) goto unlock; @@ -647,9 +652,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr, */ static int get_fw_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, - FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, - vers, 0); + return t4_read_flash(adapter, adapter->params.sf_fw_start + + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** @@ -661,8 +665,8 @@ static int get_fw_version(struct adapter *adapter, u32 *vers) */ static int get_tp_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, - tp_microcode_ver), + return t4_read_flash(adapter, adapter->params.sf_fw_start + + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } @@ -684,9 +688,9 @@ int t4_check_fw_version(struct adapter *adapter) if (!ret) ret = get_tp_version(adapter, &adapter->params.tp_vers); if (!ret) - ret = t4_read_flash(adapter, - FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), - 2, api_vers, 1); + ret = t4_read_flash(adapter, adapter->params.sf_fw_start + + offsetof(struct fw_hdr, intfver_nic), + 2, api_vers, 1); if (ret) return ret; @@ -726,7 +730,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, SF_ERASE_SECTOR | (start << 8))) != 0 || - (ret = flash_wait_op(adapter, 5, 500)) != 0) { + (ret = flash_wait_op(adapter, 14, 500)) != 0) { dev_err(adapter->pdev_dev, "erase of flash sector %d failed, error %d\n", start, ret); @@ -754,6 +758,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) u8 first_page[SF_PAGE_SIZE]; const u32 *p = (const u32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + unsigned int fw_img_start = adap->params.sf_fw_start; + unsigned int fw_start_sec = fw_img_start / sf_sec_size; if (!size) { dev_err(adap->pdev_dev, "FW image has no data\n"); @@ -784,8 +791,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) return -EINVAL; } - i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ - ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); + i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ + ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); if (ret) goto out; @@ -796,11 +803,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); - ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); + ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); if (ret) goto out; - addr = FW_IMG_START; + addr = fw_img_start; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; @@ -810,7 +817,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) } ret = t4_write_flash(adap, - FW_IMG_START + offsetof(struct fw_hdr, fw_ver), + fw_img_start + offsetof(struct fw_hdr, fw_ver), sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); out: if (ret) @@ -1128,6 +1135,7 @@ static void cim_intr_handler(struct adapter *adapter) static void ulprx_intr_handler(struct adapter *adapter) { static struct intr_info ulprx_intr_info[] = { + { 0x1800000, "ULPRX context error", -1, 1 }, { 0x7fffff, "ULPRX parity error", -1, 1 }, { 0 } }; @@ -1436,7 +1444,7 @@ static void pl_intr_handler(struct adapter *adap) t4_fatal_err(adap); } -#define PF_INTR_MASK (PFSW | PFCIM) +#define PF_INTR_MASK (PFSW) #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ CPL_SWITCH | SGE | ULP_TX) @@ -2510,7 +2518,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, c.retval_len16 = htonl(FW_LEN16(c)); c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | FW_PFVF_CMD_NIQ(rxq)); - c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | + c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | FW_PFVF_CMD_PMASK(pmask) | FW_PFVF_CMD_NEQ(txq)); c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | @@ -2572,7 +2580,7 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, } if (rss_size) *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); - return ntohs(c.viid_pkd); + return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); } /** @@ -2595,7 +2603,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); - c.viid_pkd = htons(FW_VI_CMD_VIID(viid)); + c.type_viid = htons(FW_VI_CMD_VIID(viid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); } @@ -3045,7 +3053,7 @@ static void __devinit init_link_config(struct link_config *lc, } } -static int __devinit wait_dev_ready(struct adapter *adap) +int t4_wait_dev_ready(struct adapter *adap) { if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) return 0; @@ -3053,6 +3061,33 @@ static int __devinit wait_dev_ready(struct adapter *adap) return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; } +static int __devinit get_flash_params(struct adapter *adap) +{ + int ret; + u32 info; + + ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = sf1_read(adap, 3, 0, 1, &info); + t4_write_reg(adap, SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + + if ((info & 0xff) != 0x20) /* not a Numonix flash */ + return -EINVAL; + info >>= 16; /* log2 of size */ + if (info >= 0x14 && info < 0x18) + adap->params.sf_nsec = 1 << (info - 16); + else if (info == 0x18) + adap->params.sf_nsec = 64; + else + return -EINVAL; + adap->params.sf_size = 1 << info; + adap->params.sf_fw_start = + t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; + return 0; +} + /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter @@ -3066,13 +3101,19 @@ int __devinit t4_prep_adapter(struct adapter *adapter) { int ret; - ret = wait_dev_ready(adapter); + ret = t4_wait_dev_ready(adapter); if (ret < 0) return ret; get_pci_mode(adapter, &adapter->params.pci); adapter->params.rev = t4_read_reg(adapter, PL_REV); + ret = get_flash_params(adapter); + if (ret < 0) { + dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); + return ret; + } + ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; @@ -3092,8 +3133,10 @@ int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) u8 addr[6]; int ret, i, j = 0; struct fw_port_cmd c; + struct fw_rss_vi_config_cmd rvc; memset(&c, 0, sizeof(c)); + memset(&rvc, 0, sizeof(rvc)); for_each_port(adap, i) { unsigned int rss_size; @@ -3122,12 +3165,22 @@ int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); + adap->port[i]->dev_id = j; ret = ntohl(c.u.info.lstatus_to_modtype); p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? FW_PORT_CMD_MDIOADDR_GET(ret) : -1; p->port_type = FW_PORT_CMD_PTYPE_GET(ret); - p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret); + p->mod_type = FW_PORT_MOD_TYPE_NA; + + rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ | + FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); + rvc.retval_len16 = htonl(FW_LEN16(rvc)); + ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); + if (ret) + return ret; + p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); j++; diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h index 025623285c93ebcb20f6651ef8aad85396084ffc..10a055565776cfceb4730df1168b44662d4d9761 100644 --- a/drivers/net/cxgb4/t4_hw.h +++ b/drivers/net/cxgb4/t4_hw.h @@ -57,8 +57,6 @@ enum { enum { SF_PAGE_SIZE = 256, /* serial flash page size */ - SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ - SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ }; enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ @@ -69,6 +67,45 @@ enum { SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ + + SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ + SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ + + SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ + + SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */ + SGE_INTRDST_IQ = 1, /* destination is an ingress queue */ + + SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */ + SGE_UPDATEDEL_INTR = 1, /* interrupt */ + SGE_UPDATEDEL_STPG = 2, /* status page */ + SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */ + + SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */ + SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */ + SGE_HOSTFCMODE_STPG = 2, /* sent to status page */ + SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */ + + SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */ + SGE_FETCHBURSTMIN_32B = 1, + SGE_FETCHBURSTMIN_64B = 2, + SGE_FETCHBURSTMIN_128B = 3, + + SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */ + SGE_FETCHBURSTMAX_128B = 1, + SGE_FETCHBURSTMAX_256B = 2, + SGE_FETCHBURSTMAX_512B = 3, + + SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */ + SGE_CIDXFLUSHTHRESH_2 = 1, + SGE_CIDXFLUSHTHRESH_4 = 2, + SGE_CIDXFLUSHTHRESH_8 = 3, + SGE_CIDXFLUSHTHRESH_16 = 4, + SGE_CIDXFLUSHTHRESH_32 = 5, + SGE_CIDXFLUSHTHRESH_64 = 6, + SGE_CIDXFLUSHTHRESH_128 = 7, + + SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */ }; struct sge_qstat { /* data written to SGE queue status entries */ @@ -90,11 +127,13 @@ struct rsp_ctrl { }; #define RSPD_NEWBUF 0x80000000U -#define RSPD_LEN 0x7fffffffU +#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU) +#define RSPD_QID(x) RSPD_LEN(x) #define RSPD_GEN(x) ((x) >> 7) #define RSPD_TYPE(x) (((x) >> 4) & 3) #define QINTR_CNT_EN 0x1 #define QINTR_TIMER_IDX(x) ((x) << 1) +#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) #endif /* __T4_HW_H */ diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h index 7a981b81afafe88a9c3be939e27fb24879e39d50..a550d0c706f3e6d70fe318c9282274355aaa62c5 100644 --- a/drivers/net/cxgb4/t4_msg.h +++ b/drivers/net/cxgb4/t4_msg.h @@ -443,8 +443,7 @@ struct cpl_tx_pkt { #define cpl_tx_pkt_xt cpl_tx_pkt -struct cpl_tx_pkt_lso { - WR_HDR; +struct cpl_tx_pkt_lso_core { __be32 lso_ctrl; #define LSO_TCPHDR_LEN(x) ((x) << 0) #define LSO_IPHDR_LEN(x) ((x) << 4) @@ -460,6 +459,12 @@ struct cpl_tx_pkt_lso { /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ }; +struct cpl_tx_pkt_lso { + WR_HDR; + struct cpl_tx_pkt_lso_core c; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + struct cpl_iscsi_hdr { union opcode_tid ot; __be16 pdu_len_ddp; @@ -524,6 +529,8 @@ struct cpl_rx_pkt { __be32 l2info; #define RXF_UDP (1 << 22) #define RXF_TCP (1 << 23) +#define RXF_IP (1 << 24) +#define RXF_IP6 (1 << 25) __be16 hdr_len; __be16 err_vec; }; @@ -623,6 +630,11 @@ struct cpl_fw6_msg { __be64 data[4]; }; +/* cpl_fw6_msg.type values */ +enum { + FW6_TYPE_CMD_RPL = 0, +}; + enum { ULP_TX_MEM_READ = 2, ULP_TX_MEM_WRITE = 3, diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h index 5ed56483cbc2d7d8f6b6f61775802d7b863c3fc5..0adc5bcec7c41b029508da3c840549501ea0ae78 100644 --- a/drivers/net/cxgb4/t4_regs.h +++ b/drivers/net/cxgb4/t4_regs.h @@ -93,12 +93,15 @@ #define PKTSHIFT_MASK 0x00001c00U #define PKTSHIFT_SHIFT 10 #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) +#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) #define INGPCIEBOUNDARY_MASK 0x00000380U #define INGPCIEBOUNDARY_SHIFT 7 #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) #define INGPADBOUNDARY_MASK 0x00000070U #define INGPADBOUNDARY_SHIFT 4 #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) +#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ + >> INGPADBOUNDARY_SHIFT) #define EGRPCIEBOUNDARY_MASK 0x0000000eU #define EGRPCIEBOUNDARY_SHIFT 1 #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) @@ -229,6 +232,7 @@ #define WINDOW_MASK 0x000000ffU #define WINDOW_SHIFT 0 #define WINDOW(x) ((x) << WINDOW_SHIFT) +#define PCIE_MEM_ACCESS_OFFSET 0x306c #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 #define RNPP 0x80000000U @@ -326,6 +330,9 @@ #define EDC_1_BASE_ADDR 0x7980 +#define CIM_BOOT_CFG 0x7b00 +#define BOOTADDR_MASK 0xffffff00U + #define CIM_PF_MAILBOX_DATA 0x240 #define CIM_PF_MAILBOX_CTRL 0x280 #define MBMSGVALID 0x00000008U diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h index 63991d68950e2efdc2d0d04e22fb86e840875358..0969f2fbc1b0f0a977b1e470a1f4d60e19be215c 100644 --- a/drivers/net/cxgb4/t4fw_api.h +++ b/drivers/net/cxgb4/t4fw_api.h @@ -71,6 +71,7 @@ struct fw_wr_hdr { #define FW_WR_ATOMIC(x) ((x) << 23) #define FW_WR_FLUSH(x) ((x) << 22) #define FW_WR_COMPL(x) ((x) << 21) +#define FW_WR_IMMDLEN_MASK 0xff #define FW_WR_IMMDLEN(x) ((x) << 0) #define FW_WR_EQUIQ (1U << 31) @@ -447,7 +448,9 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, - FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A + FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, + FW_PARAMS_PARAM_DEV_FWREV = 0x0B, + FW_PARAMS_PARAM_DEV_TPREV = 0x0C, }; /* @@ -475,7 +478,15 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, + FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15, + FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16, + FW_PARAMS_PARAM_PFVF_CQ_START = 0x17, + FW_PARAMS_PARAM_PFVF_CQ_END = 0x18, FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, + FW_PARAMS_PARAM_PFVF_VIID = 0x24, + FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, + FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, + FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, }; /* @@ -512,7 +523,7 @@ struct fw_pfvf_cmd { __be32 op_to_vfn; __be32 retval_len16; __be32 niqflint_niq; - __be32 cmask_to_neq; + __be32 type_to_neq; __be32 tc_to_nexactf; __be32 r_caps_to_nethctrl; __be16 nricq; @@ -529,11 +540,16 @@ struct fw_pfvf_cmd { #define FW_PFVF_CMD_NIQ(x) ((x) << 0) #define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) +#define FW_PFVF_CMD_TYPE (1 << 31) +#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1) + #define FW_PFVF_CMD_CMASK(x) ((x) << 24) -#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf) +#define FW_PFVF_CMD_CMASK_MASK 0xf +#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK) #define FW_PFVF_CMD_PMASK(x) ((x) << 20) -#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf) +#define FW_PFVF_CMD_PMASK_MASK 0xf +#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK) #define FW_PFVF_CMD_NEQ(x) ((x) << 0) #define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) @@ -686,6 +702,7 @@ struct fw_eq_eth_cmd { #define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) #define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) #define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) +#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) #define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) #define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) @@ -804,16 +821,16 @@ struct fw_eq_ofld_cmd { struct fw_vi_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; - __be16 viid_pkd; + __be16 type_viid; u8 mac[6]; u8 portid_pkd; u8 nmac; u8 nmac0[6]; __be16 rsssize_pkd; u8 nmac1[6]; - __be16 r7; + __be16 idsiiq_pkd; u8 nmac2[6]; - __be16 r8; + __be16 idseiq_pkd; u8 nmac3[6]; __be64 r9; __be64 r10; @@ -824,13 +841,16 @@ struct fw_vi_cmd { #define FW_VI_CMD_ALLOC (1U << 31) #define FW_VI_CMD_FREE (1U << 30) #define FW_VI_CMD_VIID(x) ((x) << 0) +#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff) #define FW_VI_CMD_PORTID(x) ((x) << 4) +#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf) #define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) /* Special VI_MAC command index ids */ #define FW_VI_MAC_ADD_MAC 0x3FF #define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE #define FW_VI_MAC_MAC_BASED_FREE 0x3FD +#define FW_CLS_TCAM_NUM_ENTRIES 336 enum fw_vi_mac_smac { FW_VI_MAC_MPS_TCAM_ENTRY, @@ -881,6 +901,7 @@ struct fw_vi_rxmode_cmd { }; #define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) +#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff #define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) #define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 #define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) @@ -1136,6 +1157,11 @@ struct fw_port_cmd { __be32 lstatus_to_modtype; __be16 pcap; __be16 acap; + __be16 mtu; + __u8 cbllen; + __u8 r9; + __be32 r10; + __be64 r11; } info; struct fw_port_ppp { __be32 pppen_to_ncsich; @@ -1161,6 +1187,7 @@ struct fw_port_cmd { #define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) #define FW_PORT_CMD_ACTION(x) ((x) << 16) +#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff) #define FW_PORT_CMD_CTLBF(x) ((x) << 10) #define FW_PORT_CMD_OVLAN3(x) ((x) << 7) @@ -1196,14 +1223,17 @@ struct fw_port_cmd { #define FW_PORT_CMD_NCSICH(x) ((x) << 4) enum fw_port_type { - FW_PORT_TYPE_FIBER, - FW_PORT_TYPE_KX4, + FW_PORT_TYPE_FIBER_XFI, + FW_PORT_TYPE_FIBER_XAUI, FW_PORT_TYPE_BT_SGMII, - FW_PORT_TYPE_KX, + FW_PORT_TYPE_BT_XFI, FW_PORT_TYPE_BT_XAUI, - FW_PORT_TYPE_KR, + FW_PORT_TYPE_KX4, FW_PORT_TYPE_CX4, - FW_PORT_TYPE_TWINAX, + FW_PORT_TYPE_KX, + FW_PORT_TYPE_KR, + FW_PORT_TYPE_SFP, + FW_PORT_TYPE_BP_AP, FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK }; @@ -1213,6 +1243,9 @@ enum fw_port_module_type { FW_PORT_MOD_TYPE_LR, FW_PORT_MOD_TYPE_SR, FW_PORT_MOD_TYPE_ER, + FW_PORT_MOD_TYPE_TWINAX_PASSIVE, + FW_PORT_MOD_TYPE_TWINAX_ACTIVE, + FW_PORT_MOD_TYPE_LRM, FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK }; @@ -1469,6 +1502,7 @@ struct fw_rss_glb_config_cmd { }; #define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) +#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf) #define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 @@ -1485,13 +1519,14 @@ struct fw_rss_vi_config_cmd { } manual; struct fw_rss_vi_config_basicvirtual { __be32 r6; - __be32 defaultq_to_ip4udpen; + __be32 defaultq_to_udpen; #define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff) #define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) #define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) #define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) #define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) -#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0) +#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0) __be64 r9; __be64 r10; } basicvirtual; diff --git a/drivers/net/cxgb4vf/Makefile b/drivers/net/cxgb4vf/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d72ee26cb4c700ec8268c713247e894323e55a2b --- /dev/null +++ b/drivers/net/cxgb4vf/Makefile @@ -0,0 +1,7 @@ +# +# Chelsio T4 SR-IOV Virtual Function Driver +# + +obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o + +cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h new file mode 100644 index 0000000000000000000000000000000000000000..8ea01962e045436c1a9c17dff6ab0d34f2efbba3 --- /dev/null +++ b/drivers/net/cxgb4vf/adapter.h @@ -0,0 +1,540 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This file should not be included directly. Include t4vf_common.h instead. + */ + +#ifndef __CXGB4VF_ADAPTER_H__ +#define __CXGB4VF_ADAPTER_H__ + +#include +#include +#include +#include +#include + +#include "../cxgb4/t4_hw.h" + +/* + * Constants of the implementation. + */ +enum { + MAX_NPORTS = 1, /* max # of "ports" */ + MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */ + MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS, + + /* + * MSI-X interrupt index usage. + */ + MSIX_FW = 0, /* MSI-X index for firmware Q */ + MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */ + MSIX_EXTRAS = 1, + MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, + + /* + * The maximum number of Ingress and Egress Queues is determined by + * the maximum number of "Queue Sets" which we support plus any + * ancillary queues. Each "Queue Set" requires one Ingress Queue + * for RX Packet Ingress Event notifications and two Egress Queues for + * a Free List and an Ethernet TX list. + */ + INGQ_EXTRAS = 2, /* firmware event queue and */ + /* forwarded interrupts */ + MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS, + MAX_EGRQ = MAX_ETH_QSETS*2, +}; + +/* + * Forward structure definition references. + */ +struct adapter; +struct sge_eth_rxq; +struct sge_rspq; + +/* + * Per-"port" information. This is really per-Virtual Interface information + * but the use of the "port" nomanclature makes it easier to go back and forth + * between the PF and VF drivers ... + */ +struct port_info { + struct adapter *adapter; /* our adapter */ + struct vlan_group *vlan_grp; /* out VLAN group */ + u16 viid; /* virtual interface ID */ + s16 xact_addr_filt; /* index of our MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + u8 pidx; /* index into adapter port[] */ + u8 port_id; /* physical port ID */ + u8 rx_offload; /* CSO, etc. */ + u8 nqsets; /* # of "Queue Sets" */ + u8 first_qset; /* index of first "Queue Set" */ + struct link_config link_cfg; /* physical port configuration */ +}; + +/* port_info.rx_offload flags */ +enum { + RX_CSO = 1 << 0, +}; + +/* + * Scatter Gather Engine resources for the "adapter". Our ingress and egress + * queues are organized into "Queue Sets" with one ingress and one egress + * queue per Queue Set. These Queue Sets are aportionable between the "ports" + * (Virtual Interfaces). One extra ingress queue is used to receive + * asynchronous messages from the firmware. Note that the "Queue IDs" that we + * use here are really "Relative Queue IDs" which are returned as part of the + * firmware command to allocate queues. These queue IDs are relative to the + * absolute Queue ID base of the section of the Queue ID space allocated to + * the PF/VF. + */ + +/* + * SGE free-list queue state. + */ +struct rx_sw_desc; +struct sge_fl { + unsigned int avail; /* # of available RX buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of buffer allocation failures */ + unsigned long large_alloc_failed; + unsigned long starving; /* # of times FL was found starving */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + unsigned int cntxt_id; /* SGE relative QID for the free list */ + unsigned int abs_id; /* SGE absolute QID for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ + __be64 *desc; /* address of HW RX descriptor ring */ + dma_addr_t addr; /* PCI bus address of hardware ring */ +}; + +/* + * An ingress packet gather list. + */ +struct pkt_gl { + skb_frag_t frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *, + const struct pkt_gl *); + +/* + * State for an SGE Response Queue. + */ +struct sge_rspq { + struct napi_struct napi; /* NAPI scheduling control */ + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 next_intr_params; /* holdoff params for next interrupt */ + int offset; /* offset into current FL buffer */ + + unsigned int unhandled_irqs; /* bogus interrupts */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + u8 intr_params; /* interrupt holdoff parameters */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 idx; /* queue index within its group */ + u16 cntxt_id; /* SGE rel QID for the response Q */ + u16 abs_id; /* SGE abs QID for the response Q */ + __be64 *desc; /* address of hardware response ring */ + dma_addr_t phys_addr; /* PCI bus address of ring */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capcity of response Q */ + struct adapter *adapter; /* our adapter */ + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; /* the handler for this response Q */ +}; + +/* + * Ethernet queue statistics + */ +struct sge_eth_stats { + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +/* + * State for an Ethernet Receive Queue. + */ +struct sge_eth_rxq { + struct sge_rspq rspq; /* Response Queue */ + struct sge_fl fl; /* Free List */ + struct sge_eth_stats stats; /* receive statistics */ +}; + +/* + * SGE Transmit Queue state. This contains all of the resources associated + * with the hardware status of a TX Queue which is a circular ring of hardware + * TX Descriptors. For convenience, it also contains a pointer to a parallel + * "Software Descriptor" array but we don't know anything about it here other + * than its type name. + */ +struct tx_desc { + /* + * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the + * hardware: Sizes, Producer and Consumer indices, etc. + */ + __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)]; +}; +struct tx_sw_desc; +struct sge_txq { + unsigned int in_use; /* # of in-use TX descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times queue has been stopped */ + unsigned long restarts; /* # of queue restarts */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + unsigned int cntxt_id; /* SGE relative QID for the TX Q */ + unsigned int abs_id; /* SGE absolute QID for the TX Q */ + struct tx_desc *desc; /* address of HW TX descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* PCI bus address of hardware ring */ +}; + +/* + * State for an Ethernet Transmit Queue. + */ +struct sge_eth_txq { + struct sge_txq q; /* SGE TX Queue */ + struct netdev_queue *txq; /* associated netdev TX queue */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of TX checksum offloads */ + unsigned long vlan_ins; /* # of TX VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +}; + +/* + * The complete set of Scatter/Gather Engine resources. + */ +struct sge { + /* + * Our "Queue Sets" ... + */ + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + + /* + * Extra ingress queues for asynchronous firmware events and + * forwarded interrupts (when in MSI mode). + */ + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + /* + * State for managing "starving Free Lists" -- Free Lists which have + * fallen below a certain threshold of buffers available to the + * hardware and attempts to refill them up to that threshold have + * failed. We have a regular "slow tick" timer process which will + * make periodic attempts to refill these starving Free Lists ... + */ + DECLARE_BITMAP(starving_fl, MAX_EGRQ); + struct timer_list rx_timer; + + /* + * State for cleaning up completed TX descriptors. + */ + struct timer_list tx_timer; + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ + u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ + + /* + * Reverse maps from Absolute Queue IDs to associated queue pointers. + * The absolute Queue IDs are in a compact range which start at a + * [potentially large] Base Queue ID. We perform the reverse map by + * first converting the Absolute Queue ID into a Relative Queue ID by + * subtracting off the Base Queue ID and then use a Relative Queue ID + * indexed table to get the pointer to the corresponding software + * queue structure. + */ + unsigned int egr_base; + unsigned int ingr_base; + void *egr_map[MAX_EGRQ]; + struct sge_rspq *ingr_map[MAX_INGQ]; +}; + +/* + * Utility macros to convert Absolute- to Relative-Queue indices and Egress- + * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide + * pointers to Ingress- and Egress-Queues can be used as both L- and R-values + */ +#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base)) +#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base)) + +#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)]) +#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)]) + +/* + * Macro to iterate across Queue Sets ("rxq" is a historic misnomer). + */ +#define for_each_ethrxq(sge, iter) \ + for (iter = 0; iter < (sge)->ethqsets; iter++) + +/* + * Per-"adapter" (Virtual Function) information. + */ +struct adapter { + /* PCI resources */ + void __iomem *regs; + struct pci_dev *pdev; + struct device *pdev_dev; + + /* "adapter" resources */ + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + struct adapter_params params; + + /* queue and interrupt resources */ + struct { + unsigned short vec; + char desc[22]; + } msix_info[MSIX_ENTRIES]; + struct sge sge; + + /* Linux network device resources */ + struct net_device *port[MAX_NPORTS]; + const char *name; + unsigned int msg_enable; + + /* debugfs resources */ + struct dentry *debugfs_root; + + /* various locks */ + spinlock_t stats_lock; +}; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1UL << 0), + USING_MSI = (1UL << 1), + USING_MSIX = (1UL << 2), + QUEUES_BOUND = (1UL << 3), +}; + +/* + * The following register read/write routine definitions are required by + * the common code. + */ + +/** + * t4_read_reg - read a HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 32-bit value of the given HW register. + */ +static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) +{ + return readl(adapter->regs + reg_addr); +} + +/** + * t4_write_reg - write a HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given HW register. + */ +static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + writel(val, adapter->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +/** + * t4_read_reg64 - read a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 64-bit value of the given HW register. + */ +static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) +{ + return readq(adapter->regs + reg_addr); +} + +/** + * t4_write_reg64 - write a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 64-bit value into the given HW register. + */ +static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, + u64 val) +{ + writeq(val, adapter->regs + reg_addr); +} + +/** + * port_name - return the string name of a port + * @adapter: the adapter + * @pidx: the port index + * + * Return the string name of the selected port. + */ +static inline const char *port_name(struct adapter *adapter, int pidx) +{ + return adapter->port[pidx]->name; +} + +/** + * t4_os_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @pidx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the common + * code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, + u8 hw_addr[]) +{ + memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); + memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @pidx: the port index + * + * Return the port_info structure for the adapter. + */ +static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx) +{ + return netdev_priv(adapter->port[pidx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +/* + * OS "Callback" function declarations. These are functions that the OS code + * is "contracted" to provide for the common code. + */ +void t4vf_os_link_changed(struct adapter *, int, int); + +/* + * SGE function prototype declarations. + */ +int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool, + struct net_device *, int, + struct sge_fl *, rspq_handler_t); +int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, + struct net_device *, struct netdev_queue *, + unsigned int); +void t4vf_free_sge_resources(struct adapter *); + +int t4vf_eth_xmit(struct sk_buff *, struct net_device *); +int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, + const struct pkt_gl *); + +irq_handler_t t4vf_intr_handler(struct adapter *); +irqreturn_t t4vf_sge_intr_msix(int, void *); + +int t4vf_sge_init(struct adapter *); +void t4vf_sge_start(struct adapter *); +void t4vf_sge_stop(struct adapter *); + +#endif /* __CXGB4VF_ADAPTER_H__ */ diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c new file mode 100644 index 0000000000000000000000000000000000000000..a16563219ac980e8fc7a1ffc2ed39f397ac84121 --- /dev/null +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -0,0 +1,2888 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4_msg.h" + +/* + * Generic information about the driver. + */ +#define DRV_VERSION "1.0.0" +#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver" + +/* + * Module Parameters. + * ================== + */ + +/* + * Default ethtool "message level" for adapters. + */ +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, + "default adapter ethtool message level bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X then MSI. This parameter determines which of these schemes the + * driver may consider as follows: + * + * msi = 2: choose from among MSI-X and MSI + * msi = 1: only consider MSI interrupts + * + * Note that unlike the Physical Function driver, this Virtual Function driver + * does _not_ support legacy INTx interrupts (this limitation is mandated by + * the PCI-E SR-IOV standard). + */ +#define MSI_MSIX 2 +#define MSI_MSI 1 +#define MSI_DEFAULT MSI_MSIX + +static int msi = MSI_DEFAULT; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI"); + +/* + * Fundamental constants. + * ====================== + */ + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + + MIN_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16, + + /* + * For purposes of manipulating the Free List size we need to + * recognize that Free Lists are actually Egress Queues (the host + * produces free buffers which the hardware consumes), Egress Queues + * indices are all in units of Egress Context Units bytes, and free + * list entries are 64-bit PCI DMA addresses. And since the state of + * the Producer Index == the Consumer Index implies an EMPTY list, we + * always have at least one Egress Unit's worth of Free List entries + * unused. See sge.c for more details ... + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + MIN_FL_RESID = FL_PER_EQ_UNIT, +}; + +/* + * Global driver state. + * ==================== + */ + +static struct dentry *cxgb4vf_debugfs_root; + +/* + * OS "Callback" functions. + * ======================== + */ + +/* + * The link status has changed on the indicated "port" (Virtual Interface). + */ +void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) +{ + struct net_device *dev = adapter->port[pidx]; + + /* + * If the port is disabled or the current recorded "link up" + * status matches the new status, just return. + */ + if (!netif_running(dev) || link_ok == netif_carrier_ok(dev)) + return; + + /* + * Tell the OS that the link status has changed and print a short + * informative message on the console about the event. + */ + if (link_ok) { + const char *s; + const char *fc; + const struct port_info *pi = netdev_priv(dev); + + netif_carrier_on(dev); + + switch (pi->link_cfg.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + + case SPEED_1000: + s = "1000Mbps"; + break; + + case SPEED_100: + s = "100Mbps"; + break; + + default: + s = "unknown"; + break; + } + + switch (pi->link_cfg.fc) { + case PAUSE_RX: + fc = "RX"; + break; + + case PAUSE_TX: + fc = "TX"; + break; + + case PAUSE_RX|PAUSE_TX: + fc = "RX/TX"; + break; + + default: + fc = "no"; + break; + } + + printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n", + dev->name, s, fc); + } else { + netif_carrier_off(dev); + printk(KERN_INFO "%s: link down\n", dev->name); + } +} + +/* + * Net device operations. + * ====================== + */ + +/* + * Record our new VLAN Group and enable/disable hardware VLAN Tag extraction + * based on whether the specified VLAN Group pointer is NULL or not. + */ +static void cxgb4vf_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp) +{ + struct port_info *pi = netdev_priv(dev); + + pi->vlan_grp = grp; + t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, grp != NULL, 0); +} + +/* + * Perform the MAC and PHY actions needed to enable a "port" (Virtual + * Interface). + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, -1, + true); + if (ret == 0) { + ret = t4vf_change_mac(pi->adapter, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + + /* + * We don't need to actually "start the link" itself since the + * firmware will do that for us when the first Virtual Interface + * is enabled on a port. + */ + if (ret == 0) + ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); + return ret; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adapter) +{ + int namelen = sizeof(adapter->msix_info[0].desc) - 1; + int pidx; + + /* + * Firmware events. + */ + snprintf(adapter->msix_info[MSIX_FW].desc, namelen, + "%s-FWeventq", adapter->name); + adapter->msix_info[MSIX_FW].desc[namelen] = 0; + + /* + * Ethernet queues. + */ + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + const struct port_info *pi = netdev_priv(dev); + int qs, msi; + + for (qs = 0, msi = MSIX_NIQFLINT; + qs < pi->nqsets; + qs++, msi++) { + snprintf(adapter->msix_info[msi].desc, namelen, + "%s-%d", dev->name, qs); + adapter->msix_info[msi].desc[namelen] = 0; + } + } +} + +/* + * Request all of our MSI-X resources. + */ +static int request_msix_queue_irqs(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq, msi, err; + + /* + * Firmware events. + */ + err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix, + 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq); + if (err) + return err; + + /* + * Ethernet queues. + */ + msi = MSIX_NIQFLINT; + for_each_ethrxq(s, rxq) { + err = request_irq(adapter->msix_info[msi].vec, + t4vf_sge_intr_msix, 0, + adapter->msix_info[msi].desc, + &s->ethrxq[rxq].rspq); + if (err) + goto err_free_irqs; + msi++; + } + return 0; + +err_free_irqs: + while (--rxq >= 0) + free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); + free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); + return err; +} + +/* + * Free our MSI-X resources. + */ +static void free_msix_queue_irqs(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq, msi; + + free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); + msi = MSIX_NIQFLINT; + for_each_ethrxq(s, rxq) + free_irq(adapter->msix_info[msi++].vec, + &s->ethrxq[rxq].rspq); +} + +/* + * Turn on NAPI and start up interrupts on a response queue. + */ +static void qenable(struct sge_rspq *rspq) +{ + napi_enable(&rspq->napi); + + /* + * 0-increment the Going To Sleep register to start the timer and + * enable interrupts. + */ + t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(0) | + SEINTARM(rspq->intr_params) | + INGRESSQID(rspq->cntxt_id)); +} + +/* + * Enable NAPI scheduling and interrupt generation for all Receive Queues. + */ +static void enable_rx(struct adapter *adapter) +{ + int rxq; + struct sge *s = &adapter->sge; + + for_each_ethrxq(s, rxq) + qenable(&s->ethrxq[rxq].rspq); + qenable(&s->fw_evtq); + + /* + * The interrupt queue doesn't use NAPI so we do the 0-increment of + * its Going To Sleep register here to get it started. + */ + if (adapter->flags & USING_MSI) + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(0) | + SEINTARM(s->intrq.intr_params) | + INGRESSQID(s->intrq.cntxt_id)); + +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq; + + for_each_ethrxq(s, rxq) + napi_disable(&s->ethrxq[rxq].rspq.napi); + napi_disable(&s->fw_evtq.napi); +} + +/* + * Response queue handler for the firmware event queue. + */ +static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, + const struct pkt_gl *gl) +{ + /* + * Extract response opcode and get pointer to CPL message body. + */ + struct adapter *adapter = rspq->adapter; + u8 opcode = ((const struct rss_header *)rsp)->opcode; + void *cpl = (void *)(rsp + 1); + + switch (opcode) { + case CPL_FW6_MSG: { + /* + * We've received an asynchronous message from the firmware. + */ + const struct cpl_fw6_msg *fw_msg = cpl; + if (fw_msg->type == FW6_TYPE_CMD_RPL) + t4vf_handle_fw_rpl(adapter, fw_msg->data); + break; + } + + case CPL_SGE_EGR_UPDATE: { + /* + * We've received an Egress Queue Status Update message. We + * get these, if the SGE is configured to send these when the + * firmware passes certain points in processing our TX + * Ethernet Queue or if we make an explicit request for one. + * We use these updates to determine when we may need to + * restart a TX Ethernet Queue which was stopped for lack of + * free TX Queue Descriptors ... + */ + const struct cpl_sge_egr_update *p = (void *)cpl; + unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); + struct sge *s = &adapter->sge; + struct sge_txq *tq; + struct sge_eth_txq *txq; + unsigned int eq_idx; + + /* + * Perform sanity checking on the Queue ID to make sure it + * really refers to one of our TX Ethernet Egress Queues which + * is active and matches the queue's ID. None of these error + * conditions should ever happen so we may want to either make + * them fatal and/or conditionalized under DEBUG. + */ + eq_idx = EQ_IDX(s, qid); + if (unlikely(eq_idx >= MAX_EGRQ)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d out of range\n", qid); + break; + } + tq = s->egr_map[eq_idx]; + if (unlikely(tq == NULL)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d TXQ=NULL\n", qid); + break; + } + txq = container_of(tq, struct sge_eth_txq, q); + if (unlikely(tq->abs_id != qid)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d refers to TXQ %d\n", + qid, tq->abs_id); + break; + } + + /* + * Restart a stopped TX Queue which has less than half of its + * TX ring in use ... + */ + txq->q.restarts++; + netif_tx_wake_queue(txq->txq); + break; + } + + default: + dev_err(adapter->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); + } + + return 0; +} + +/* + * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues + * to use and initializes them. We support multiple "Queue Sets" per port if + * we have MSI-X, otherwise just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err, pidx, msix; + + /* + * Clear "Queue Set" Free List Starving and TX Queue Mapping Error + * state. + */ + bitmap_zero(s->starving_fl, MAX_EGRQ); + + /* + * If we're using MSI interrupt mode we need to set up a "forwarded + * interrupt" queue which we'll set up with our MSI vector. The rest + * of the ingress queues will be set up to forward their interrupts to + * this queue ... This must be first since t4vf_sge_alloc_rxq() uses + * the intrq's queue ID as the interrupt forwarding queue for the + * subsequent calls ... + */ + if (adapter->flags & USING_MSI) { + err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false, + adapter->port[0], 0, NULL, NULL); + if (err) + goto err_free_queues; + } + + /* + * Allocate our ingress queue for asynchronous firmware messages. + */ + err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0], + MSIX_FW, NULL, fwevtq_handler); + if (err) + goto err_free_queues; + + /* + * Allocate each "port"'s initial Queue Sets. These can be changed + * later on ... up to the point where any interface on the adapter is + * brought up at which point lots of things get nailed down + * permanently ... + */ + msix = MSIX_NIQFLINT; + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; + int qs; + + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, + dev, msix++, + &rxq->fl, t4vf_ethrx_handler); + if (err) + goto err_free_queues; + + err = t4vf_sge_alloc_eth_txq(adapter, txq, dev, + netdev_get_tx_queue(dev, qs), + s->fw_evtq.cntxt_id); + if (err) + goto err_free_queues; + + rxq->rspq.idx = qs; + memset(&rxq->stats, 0, sizeof(rxq->stats)); + } + } + + /* + * Create the reverse mappings for the queues. + */ + s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id; + s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id; + IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq; + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; + int qs; + + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; + EQ_MAP(s, txq->q.abs_id) = &txq->q; + + /* + * The FW_IQ_CMD doesn't return the Absolute Queue IDs + * for Free Lists but since all of the Egress Queues + * (including Free Lists) have Relative Queue IDs + * which are computed as Absolute - Base Queue ID, we + * can synthesize the Absolute Queue IDs for the Free + * Lists. This is useful for debugging purposes when + * we want to dump Queue Contexts via the PF Driver. + */ + rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; + EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; + } + } + return 0; + +err_free_queues: + t4vf_free_sge_resources(adapter); + return err; +} + +/* + * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive + * queues. We configure the RSS CPU lookup table to distribute to the number + * of HW receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each "port" (Virtual + * Interface). We always configure the RSS mapping for all ports since the + * mapping table has plenty of entries. + */ +static int setup_rss(struct adapter *adapter) +{ + int pidx; + + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; + u16 rss[MAX_PORT_QSETS]; + int qs, err; + + for (qs = 0; qs < pi->nqsets; qs++) + rss[qs] = rxq[qs].rspq.abs_id; + + err = t4vf_config_rss_range(adapter, pi->viid, + 0, pi->rss_size, rss, pi->nqsets); + if (err) + return err; + + /* + * Perform Global RSS Mode-specific initialization. + */ + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: + /* + * If Tunnel All Lookup isn't specified in the global + * RSS Configuration, then we need to specify a + * default Ingress Queue for any ingress packets which + * aren't hashed. We'll use our first ingress queue + * ... + */ + if (!adapter->params.rss.u.basicvirtual.tnlalllookup) { + union rss_vi_config config; + err = t4vf_read_rss_vi_config(adapter, + pi->viid, + &config); + if (err) + return err; + config.basicvirtual.defaultq = + rxq[0].rspq.abs_id; + err = t4vf_write_rss_vi_config(adapter, + pi->viid, + &config); + if (err) + return err; + } + break; + } + } + + return 0; +} + +/* + * Bring the adapter up. Called whenever we go from no "ports" open to having + * one open. This function performs the actions necessary to make an adapter + * operational, such as completing the initialization of HW modules, and + * enabling interrupts. Must be called with the rtnl lock held. (Note that + * this is called "cxgb_up" in the PF Driver.) + */ +static int adapter_up(struct adapter *adapter) +{ + int err; + + /* + * If this is the first time we've been called, perform basic + * adapter setup. Once we've done this, many of our adapter + * parameters can no longer be changed ... + */ + if ((adapter->flags & FULL_INIT_DONE) == 0) { + err = setup_sge_queues(adapter); + if (err) + return err; + err = setup_rss(adapter); + if (err) { + t4vf_free_sge_resources(adapter); + return err; + } + + if (adapter->flags & USING_MSIX) + name_msix_vecs(adapter); + adapter->flags |= FULL_INIT_DONE; + } + + /* + * Acquire our interrupt resources. We only support MSI-X and MSI. + */ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + if (adapter->flags & USING_MSIX) + err = request_msix_queue_irqs(adapter); + else + err = request_irq(adapter->pdev->irq, + t4vf_intr_handler(adapter), 0, + adapter->name, adapter); + if (err) { + dev_err(adapter->pdev_dev, "request_irq failed, err %d\n", + err); + return err; + } + + /* + * Enable NAPI ingress processing and return success. + */ + enable_rx(adapter); + t4vf_sge_start(adapter); + return 0; +} + +/* + * Bring the adapter down. Called whenever the last "port" (Virtual + * Interface) closed. (Note that this routine is called "cxgb_down" in the PF + * Driver.) + */ +static void adapter_down(struct adapter *adapter) +{ + /* + * Free interrupt resources. + */ + if (adapter->flags & USING_MSIX) + free_msix_queue_irqs(adapter); + else + free_irq(adapter->pdev->irq, adapter); + + /* + * Wait for NAPI handlers to finish. + */ + quiesce_rx(adapter); +} + +/* + * Start up a net device. + */ +static int cxgb4vf_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + /* + * If this is the first interface that we're opening on the "adapter", + * bring the "adapter" up now. + */ + if (adapter->open_device_map == 0) { + err = adapter_up(adapter); + if (err) + return err; + } + + /* + * Note that this interface is up and start everything up ... + */ + dev->real_num_tx_queues = pi->nqsets; + set_bit(pi->port_id, &adapter->open_device_map); + link_start(dev); + netif_tx_start_all_queues(dev); + return 0; +} + +/* + * Shut down a net device. This routine is called "cxgb_close" in the PF + * Driver ... + */ +static int cxgb4vf_stop(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + ret = t4vf_enable_vi(adapter, pi->viid, false, false); + pi->link_cfg.link_ok = 0; + + clear_bit(pi->port_id, &adapter->open_device_map); + if (adapter->open_device_map == 0) + adapter_down(adapter); + return 0; +} + +/* + * Translate our basic statistics into the standard "ifconfig" statistics. + */ +static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) +{ + struct t4vf_port_stats stats; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + struct net_device_stats *ns = &dev->stats; + int err; + + spin_lock(&adapter->stats_lock); + err = t4vf_get_port_stats(adapter, pi->pidx, &stats); + spin_unlock(&adapter->stats_lock); + + memset(ns, 0, sizeof(*ns)); + if (err) + return ns; + + ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes + + stats.tx_ucast_bytes + stats.tx_offload_bytes); + ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames + + stats.tx_ucast_frames + stats.tx_offload_frames); + ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes + + stats.rx_ucast_bytes); + ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames + + stats.rx_ucast_frames); + ns->multicast = stats.rx_mcast_frames; + ns->tx_errors = stats.tx_drop_frames; + ns->rx_errors = stats.rx_err_frames; + + return ns; +} + +/* + * Collect up to maxaddrs worth of a netdevice's unicast addresses into an + * array of addrss pointers and return the number collected. + */ +static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int maxaddrs) +{ + unsigned int naddr = 0; + const struct netdev_hw_addr *ha; + + for_each_dev_addr(dev, ha) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } + return naddr; +} + +/* + * Collect up to maxaddrs worth of a netdevice's multicast addresses into an + * array of addrss pointers and return the number collected. + */ +static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int maxaddrs) +{ + unsigned int naddr = 0; + const struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } + return naddr; +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + u16 filt_idx[7]; + const u8 *addr[7]; + int ret, naddr = 0; + const struct port_info *pi = netdev_priv(dev); + + /* first do the secondary unicast addresses */ + naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); + if (naddr > 0) { + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, + naddr, addr, filt_idx, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + } + + /* next set up the multicast addresses */ + naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); + if (naddr > 0) { + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, + naddr, addr, filt_idx, &mhash, sleep); + if (ret < 0) + return ret; + } + + return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +/* + * Set RX properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1, + (dev->flags & IFF_PROMISC) != 0, + (dev->flags & IFF_ALLMULTI) != 0, + 1, -1, sleep_ok); + return ret; +} + +/* + * Set the current receive modes on the device. + */ +static void cxgb4vf_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +/* + * Find the entry in the interrupt holdoff timer value array which comes + * closest to the specified interrupt holdoff value. + */ +static int closest_timer(const struct sge *s, int us) +{ + int i, timer_idx = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + int delta = us - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + timer_idx = i; + } + } + return timer_idx; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, pktcnt_idx = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + pktcnt_idx = i; + } + } + return pktcnt_idx; +} + +/* + * Return a queue's interrupt hold-off time in us. 0 means no timer. + */ +static unsigned int qtimer_val(const struct adapter *adapter, + const struct sge_rspq *rspq) +{ + unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params); + + return timer_idx < SGE_NTIMERS + ? adapter->sge.timer_val[timer_idx] + : 0; +} + +/** + * set_rxq_intr_params - set a queue's interrupt holdoff parameters + * @adapter: the adapter + * @rspq: the RX response queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an RX response queue's interrupt hold-off time and packet count. + * At least one of the two needs to be enabled for the queue to generate + * interrupts. + */ +static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, + unsigned int us, unsigned int cnt) +{ + unsigned int timer_idx; + + /* + * If both the interrupt holdoff timer and count are specified as + * zero, default to a holdoff count of 1 ... + */ + if ((us | cnt) == 0) + cnt = 1; + + /* + * If an interrupt holdoff count has been specified, then find the + * closest configured holdoff count and use that. If the response + * queue has already been created, then update its queue context + * parameters ... + */ + if (cnt) { + int err; + u32 v, pktcnt_idx; + + pktcnt_idx = closest_thres(&adapter->sge, cnt); + if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { + v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ(rspq->cntxt_id); + err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); + if (err) + return err; + } + rspq->pktcnt_idx = pktcnt_idx; + } + + /* + * Compute the closest holdoff timer index from the supplied holdoff + * timer value. + */ + timer_idx = (us == 0 + ? SGE_TIMER_RSTRT_CNTR + : closest_timer(&adapter->sge, us)); + + /* + * Update the response queue's interrupt coalescing parameters and + * return success. + */ + rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | + (cnt > 0 ? QINTR_CNT_EN : 0)); + return 0; +} + +/* + * Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + */ +static inline unsigned int mk_adap_vers(const struct adapter *adapter) +{ + /* + * Chip version 4, revision 0x3f (cxgb4vf). + */ + return 4 | (0x3f << 10); +} + +/* + * Execute the specified ioctl command. + */ +static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int ret = 0; + + switch (cmd) { + /* + * The VF Driver doesn't have access to any of the other + * common Ethernet device ioctl()'s (like reading/writing + * PHY registers, etc. + */ + + default: + ret = -EOPNOTSUPP; + break; + } + return ret; +} + +/* + * Change the device's MTU. + */ +static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* accommodate SACK */ + if (new_mtu < 81) + return -EINVAL; + + ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, + -1, -1, -1, -1, true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +/* + * Change the devices MAC address. + */ +static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) +{ + int ret; + struct sockaddr *addr = _addr; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt, + addr->sa_data, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +/* + * Return a TX Queue on which to send the specified skb. + */ +static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + /* + * XXX For now just use the default hash but we probably want to + * XXX look at other possibilities ... + */ + return skb_tx_hash(dev, skb); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Poll all of our receive queues. This is called outside of normal interrupt + * context. + */ +static void cxgb4vf_poll_controller(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (adapter->flags & USING_MSIX) { + struct sge_eth_rxq *rxq; + int nqsets; + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + for (nqsets = pi->nqsets; nqsets; nqsets--) { + t4vf_sge_intr_msix(0, &rxq->rspq); + rxq++; + } + } else + t4vf_intr_handler(adapter)(0, adapter); +} +#endif + +/* + * Ethtool operations. + * =================== + * + * Note that we don't support any ethtool operations which change the physical + * state of the port to which we're linked. + */ + +/* + * Return current port link settings. + */ +static int cxgb4vf_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + const struct port_info *pi = netdev_priv(dev); + + cmd->supported = pi->link_cfg.supported; + cmd->advertising = pi->link_cfg.advertising; + cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1; + cmd->duplex = DUPLEX_FULL; + + cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; + cmd->phy_address = pi->port_id; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = pi->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +/* + * Return our driver information. + */ +static void cxgb4vf_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct adapter *adapter = netdev2adap(dev); + + strcpy(drvinfo->driver, KBUILD_MODNAME); + strcpy(drvinfo->version, DRV_VERSION); + strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent))); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev), + FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev), + FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev), + FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev)); +} + +/* + * Return current adapter message level. + */ +static u32 cxgb4vf_get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +/* + * Set current adapter message level. + */ +static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel) +{ + netdev2adap(dev)->msg_enable = msglevel; +} + +/* + * Return the device's current Queue Set ring size parameters along with the + * allowed maximum values. Since ethtool doesn't understand the concept of + * multi-queue devices, we just return the current values associated with the + * first Queue Set. + */ +static void cxgb4vf_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + rp->rx_max_pending = MAX_RX_BUFFERS; + rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + rp->rx_jumbo_max_pending = 0; + rp->tx_max_pending = MAX_TXQ_ENTRIES; + + rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; + rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + rp->rx_jumbo_pending = 0; + rp->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +/* + * Set the Queue Set ring size parameters for the device. Again, since + * ethtool doesn't allow for the concept of multiple queues per device, we'll + * apply these new values across all of the Queue Sets associated with the + * device -- after vetting them of course! + */ +static int cxgb4vf_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + int qs; + + if (rp->rx_pending > MAX_RX_BUFFERS || + rp->rx_jumbo_pending || + rp->tx_pending > MAX_TXQ_ENTRIES || + rp->rx_mini_pending > MAX_RSPQ_ENTRIES || + rp->rx_mini_pending < MIN_RSPQ_ENTRIES || + rp->rx_pending < MIN_FL_ENTRIES || + rp->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) { + s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; + s->ethrxq[qs].rspq.size = rp->rx_mini_pending; + s->ethtxq[qs].q.size = rp->tx_pending; + } + return 0; +} + +/* + * Return the interrupt holdoff timer and count for the first Queue Set on the + * device. Our extension ioctl() (the cxgbtool interface) allows the + * interrupt holdoff timer to be read on all of the device's Queue Sets. + */ +static int cxgb4vf_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adapter = pi->adapter; + const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; + + coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); + coalesce->rx_max_coalesced_frames = + ((rspq->intr_params & QINTR_CNT_EN) + ? adapter->sge.counter_val[rspq->pktcnt_idx] + : 0); + return 0; +} + +/* + * Set the RX interrupt holdoff timer and count for the first Queue Set on the + * interface. Our extension ioctl() (the cxgbtool interface) allows us to set + * the interrupt holdoff timer on any of the device's Queue Sets. + */ +static int cxgb4vf_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + return set_rxq_intr_params(adapter, + &adapter->sge.ethrxq[pi->first_qset].rspq, + coalesce->rx_coalesce_usecs, + coalesce->rx_max_coalesced_frames); +} + +/* + * Report current port link pause parameter settings. + */ +static void cxgb4vf_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pauseparam) +{ + struct port_info *pi = netdev_priv(dev); + + pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; + pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; +} + +/* + * Return whether RX Checksum Offloading is currently enabled for the device. + */ +static u32 cxgb4vf_get_rx_csum(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + return (pi->rx_offload & RX_CSO) != 0; +} + +/* + * Turn RX Checksum Offloading on or off for the device. + */ +static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum) +{ + struct port_info *pi = netdev_priv(dev); + + if (csum) + pi->rx_offload |= RX_CSO; + else + pi->rx_offload &= ~RX_CSO; + return 0; +} + +/* + * Identify the port by blinking the port's LED. + */ +static int cxgb4vf_phys_id(struct net_device *dev, u32 id) +{ + struct port_info *pi = netdev_priv(dev); + + return t4vf_identify_port(pi->adapter, pi->viid, 5); +} + +/* + * Port stats maintained per queue of the port. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; +}; + +/* + * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that + * these need to match the order of statistics returned by + * t4vf_get_port_stats(). + */ +static const char stats_strings[][ETH_GSTRING_LEN] = { + /* + * These must match the layout of the t4vf_port_stats structure. + */ + "TxBroadcastBytes ", + "TxBroadcastFrames ", + "TxMulticastBytes ", + "TxMulticastFrames ", + "TxUnicastBytes ", + "TxUnicastFrames ", + "TxDroppedFrames ", + "TxOffloadBytes ", + "TxOffloadFrames ", + "RxBroadcastBytes ", + "RxBroadcastFrames ", + "RxMulticastBytes ", + "RxMulticastFrames ", + "RxUnicastBytes ", + "RxUnicastFrames ", + "RxErrorFrames ", + + /* + * These are accumulated per-queue statistics and must match the + * order of the fields in the queue_port_stats structure. + */ + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", +}; + +/* + * Return the number of statistics in the specified statistics set. + */ +static int cxgb4vf_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } + /*NOTREACHED*/ +} + +/* + * Return the strings for the specified statistics set. + */ +static void cxgb4vf_get_strings(struct net_device *dev, + u32 sset, + u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + memcpy(data, stats_strings, sizeof(stats_strings)); + break; + } +} + +/* + * Small utility routine to accumulate queue statistics across the queues of + * a "port". + */ +static void collect_sge_port_stats(const struct adapter *adapter, + const struct port_info *pi, + struct queue_port_stats *stats) +{ + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset]; + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; + int qs; + + memset(stats, 0, sizeof(*stats)); + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + stats->tso += txq->tso; + stats->tx_csum += txq->tx_cso; + stats->rx_csum += rxq->stats.rx_cso; + stats->vlan_ex += rxq->stats.vlan_ex; + stats->vlan_ins += txq->vlan_ins; + } +} + +/* + * Return the ETH_SS_STATS statistics set. + */ +static void cxgb4vf_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + int err = t4vf_get_port_stats(adapter, pi->pidx, + (struct t4vf_port_stats *)data); + if (err) + memset(data, 0, sizeof(struct t4vf_port_stats)); + + data += sizeof(struct t4vf_port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); +} + +/* + * Return the size of our register map. + */ +static int cxgb4vf_get_regs_len(struct net_device *dev) +{ + return T4VF_REGMAP_SIZE; +} + +/* + * Dump a block of registers, start to end inclusive, into a buffer. + */ +static void reg_block_dump(struct adapter *adapter, void *regbuf, + unsigned int start, unsigned int end) +{ + u32 *bp = regbuf + start - T4VF_REGMAP_START; + + for ( ; start <= end; start += sizeof(u32)) { + /* + * Avoid reading the Mailbox Control register since that + * can trigger a Mailbox Ownership Arbitration cycle and + * interfere with communication with the firmware. + */ + if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL) + *bp++ = 0xffff; + else + *bp++ = t4_read_reg(adapter, start); + } +} + +/* + * Copy our entire register map into the provided buffer. + */ +static void cxgb4vf_get_regs(struct net_device *dev, + struct ethtool_regs *regs, + void *regbuf) +{ + struct adapter *adapter = netdev2adap(dev); + + regs->version = mk_adap_vers(adapter); + + /* + * Fill in register buffer with our register map. + */ + memset(regbuf, 0, T4VF_REGMAP_SIZE); + + reg_block_dump(adapter, regbuf, + T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST, + T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST); + reg_block_dump(adapter, regbuf, + T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, + T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); + reg_block_dump(adapter, regbuf, + T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, + T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); + reg_block_dump(adapter, regbuf, + T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, + T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); + + reg_block_dump(adapter, regbuf, + T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST, + T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST); +} + +/* + * Report current Wake On LAN settings. + */ +static void cxgb4vf_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +/* + * Set TCP Segmentation Offloading feature capabilities. + */ +static int cxgb4vf_set_tso(struct net_device *dev, u32 tso) +{ + if (tso) + dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + else + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + return 0; +} + +static struct ethtool_ops cxgb4vf_ethtool_ops = { + .get_settings = cxgb4vf_get_settings, + .get_drvinfo = cxgb4vf_get_drvinfo, + .get_msglevel = cxgb4vf_get_msglevel, + .set_msglevel = cxgb4vf_set_msglevel, + .get_ringparam = cxgb4vf_get_ringparam, + .set_ringparam = cxgb4vf_set_ringparam, + .get_coalesce = cxgb4vf_get_coalesce, + .set_coalesce = cxgb4vf_set_coalesce, + .get_pauseparam = cxgb4vf_get_pauseparam, + .get_rx_csum = cxgb4vf_get_rx_csum, + .set_rx_csum = cxgb4vf_set_rx_csum, + .set_tx_csum = ethtool_op_set_tx_ipv6_csum, + .set_sg = ethtool_op_set_sg, + .get_link = ethtool_op_get_link, + .get_strings = cxgb4vf_get_strings, + .phys_id = cxgb4vf_phys_id, + .get_sset_count = cxgb4vf_get_sset_count, + .get_ethtool_stats = cxgb4vf_get_ethtool_stats, + .get_regs_len = cxgb4vf_get_regs_len, + .get_regs = cxgb4vf_get_regs, + .get_wol = cxgb4vf_get_wol, + .set_tso = cxgb4vf_set_tso, +}; + +/* + * /sys/kernel/debug/cxgb4vf support code and data. + * ================================================ + */ + +/* + * Show SGE Queue Set information. We display QPL Queues Sets per line. + */ +#define QPL 4 + +static int sge_qinfo_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); + int qs, r = (uintptr_t)v - 1; + + if (r) + seq_putc(seq, '\n'); + + #define S3(fmt_spec, s, v) \ + do {\ + seq_printf(seq, "%-12s", s); \ + for (qs = 0; qs < n; ++qs) \ + seq_printf(seq, " %16" fmt_spec, v); \ + seq_putc(seq, '\n'); \ + } while (0) + #define S(s, v) S3("s", s, v) + #define T(s, v) S3("u", s, txq[qs].v) + #define R(s, v) S3("u", s, rxq[qs].v) + + if (r < eth_entries) { + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; + int n = min(QPL, adapter->sge.ethqsets - QPL * r); + + S("QType:", "Ethernet"); + S("Interface:", + (rxq[qs].rspq.netdev + ? rxq[qs].rspq.netdev->name + : "N/A")); + S3("d", "Port:", + (rxq[qs].rspq.netdev + ? ((struct port_info *) + netdev_priv(rxq[qs].rspq.netdev))->port_id + : -1)); + T("TxQ ID:", q.abs_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ PIdx:", q.pidx); + T("TxQ CIdx:", q.cidx); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq)); + S3("u", "Intr pktcnt:", + adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); + R("RspQ CIdx:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + R("FL ID:", fl.abs_id); + R("FL size:", fl.size - MIN_FL_RESID); + R("FL avail:", fl.avail); + R("FL PIdx:", fl.pidx); + R("FL CIdx:", fl.cidx); + return 0; + } + + r -= eth_entries; + if (r == 0) { + const struct sge_rspq *evtq = &adapter->sge.fw_evtq; + + seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); + seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); + seq_printf(seq, "%-12s %16u\n", "Intr delay:", + qtimer_val(adapter, evtq)); + seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", + adapter->sge.counter_val[evtq->pktcnt_idx]); + seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx); + seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); + } else if (r == 1) { + const struct sge_rspq *intrq = &adapter->sge.intrq; + + seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue"); + seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id); + seq_printf(seq, "%-12s %16u\n", "Intr delay:", + qtimer_val(adapter, intrq)); + seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", + adapter->sge.counter_val[intrq->pktcnt_idx]); + seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx); + seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen); + } + + #undef R + #undef T + #undef S + #undef S3 + + return 0; +} + +/* + * Return the number of "entries" in our "file". We group the multi-Queue + * sections with QPL Queue Sets per "entry". The sections of the output are: + * + * Ethernet RX/TX Queue Sets + * Firmware Event Queue + * Forwarded Interrupt Queue (if in MSI mode) + */ +static int sge_queue_entries(const struct adapter *adapter) +{ + return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + + ((adapter->flags & USING_MSI) != 0); +} + +static void *sge_queue_start(struct seq_file *seq, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static void sge_queue_stop(struct seq_file *seq, void *v) +{ +} + +static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + ++*pos; + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static const struct seq_operations sge_qinfo_seq_ops = { + .start = sge_queue_start, + .next = sge_queue_next, + .stop = sge_queue_stop, + .show = sge_qinfo_show +}; + +static int sge_qinfo_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &sge_qinfo_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations sge_qinfo_debugfs_fops = { + .owner = THIS_MODULE, + .open = sge_qinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Show SGE Queue Set statistics. We display QPL Queues Sets per line. + */ +#define QPL 4 + +static int sge_qstats_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); + int qs, r = (uintptr_t)v - 1; + + if (r) + seq_putc(seq, '\n'); + + #define S3(fmt, s, v) \ + do { \ + seq_printf(seq, "%-16s", s); \ + for (qs = 0; qs < n; ++qs) \ + seq_printf(seq, " %8" fmt, v); \ + seq_putc(seq, '\n'); \ + } while (0) + #define S(s, v) S3("s", s, v) + + #define T3(fmt, s, v) S3(fmt, s, txq[qs].v) + #define T(s, v) T3("lu", s, v) + + #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v) + #define R(s, v) R3("lu", s, v) + + if (r < eth_entries) { + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; + int n = min(QPL, adapter->sge.ethqsets - QPL * r); + + S("QType:", "Ethernet"); + S("Interface:", + (rxq[qs].rspq.netdev + ? rxq[qs].rspq.netdev->name + : "N/A")); + R3("u", "RspQNullInts:", rspq.unhandled_irqs); + R("RxPackets:", stats.pkts); + R("RxCSO:", stats.rx_cso); + R("VLANxtract:", stats.vlan_ex); + R("LROmerged:", stats.lro_merged); + R("LROpackets:", stats.lro_pkts); + R("RxDrops:", stats.rx_drops); + T("TSO:", tso); + T("TxCSO:", tx_cso); + T("VLANins:", vlan_ins); + T("TxQFull:", q.stops); + T("TxQRestarts:", q.restarts); + T("TxMapErr:", mapping_err); + R("FLAllocErr:", fl.alloc_failed); + R("FLLrgAlcErr:", fl.large_alloc_failed); + R("FLStarving:", fl.starving); + return 0; + } + + r -= eth_entries; + if (r == 0) { + const struct sge_rspq *evtq = &adapter->sge.fw_evtq; + + seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue"); + seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", + evtq->unhandled_irqs); + seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx); + seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen); + } else if (r == 1) { + const struct sge_rspq *intrq = &adapter->sge.intrq; + + seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue"); + seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", + intrq->unhandled_irqs); + seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx); + seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen); + } + + #undef R + #undef T + #undef S + #undef R3 + #undef T3 + #undef S3 + + return 0; +} + +/* + * Return the number of "entries" in our "file". We group the multi-Queue + * sections with QPL Queue Sets per "entry". The sections of the output are: + * + * Ethernet RX/TX Queue Sets + * Firmware Event Queue + * Forwarded Interrupt Queue (if in MSI mode) + */ +static int sge_qstats_entries(const struct adapter *adapter) +{ + return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + + ((adapter->flags & USING_MSI) != 0); +} + +static void *sge_qstats_start(struct seq_file *seq, loff_t *pos) +{ + int entries = sge_qstats_entries(seq->private); + + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static void sge_qstats_stop(struct seq_file *seq, void *v) +{ +} + +static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos) +{ + int entries = sge_qstats_entries(seq->private); + + (*pos)++; + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static const struct seq_operations sge_qstats_seq_ops = { + .start = sge_qstats_start, + .next = sge_qstats_next, + .stop = sge_qstats_stop, + .show = sge_qstats_show +}; + +static int sge_qstats_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &sge_qstats_seq_ops); + + if (res == 0) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations sge_qstats_proc_fops = { + .owner = THIS_MODULE, + .open = sge_qstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Show PCI-E SR-IOV Virtual Function Resource Limits. + */ +static int resources_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct vf_resources *vfres = &adapter->params.vfres; + + #define S(desc, fmt, var) \ + seq_printf(seq, "%-60s " fmt "\n", \ + desc " (" #var "):", vfres->var) + + S("Virtual Interfaces", "%d", nvi); + S("Egress Queues", "%d", neq); + S("Ethernet Control", "%d", nethctrl); + S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); + S("Ingress Queues", "%d", niq); + S("Traffic Class", "%d", tc); + S("Port Access Rights Mask", "%#x", pmask); + S("MAC Address Filters", "%d", nexactf); + S("Firmware Command Read Capabilities", "%#x", r_caps); + S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); + + #undef S + + return 0; +} + +static int resources_open(struct inode *inode, struct file *file) +{ + return single_open(file, resources_show, inode->i_private); +} + +static const struct file_operations resources_proc_fops = { + .owner = THIS_MODULE, + .open = resources_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Show Virtual Interfaces. + */ +static int interfaces_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Interface Port VIID\n"); + } else { + struct adapter *adapter = seq->private; + int pidx = (uintptr_t)v - 2; + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + + seq_printf(seq, "%9s %4d %#5x\n", + dev->name, pi->port_id, pi->viid); + } + return 0; +} + +static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos) +{ + return pos <= adapter->params.nports + ? (void *)(uintptr_t)(pos + 1) + : NULL; +} + +static void *interfaces_start(struct seq_file *seq, loff_t *pos) +{ + return *pos + ? interfaces_get_idx(seq->private, *pos) + : SEQ_START_TOKEN; +} + +static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + return interfaces_get_idx(seq->private, *pos); +} + +static void interfaces_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations interfaces_seq_ops = { + .start = interfaces_start, + .next = interfaces_next, + .stop = interfaces_stop, + .show = interfaces_show +}; + +static int interfaces_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &interfaces_seq_ops); + + if (res == 0) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations interfaces_proc_fops = { + .owner = THIS_MODULE, + .open = interfaces_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * /sys/kernel/debugfs/cxgb4vf/ files list. + */ +struct cxgb4vf_debugfs_entry { + const char *name; /* name of debugfs node */ + mode_t mode; /* file system mode */ + const struct file_operations *fops; +}; + +static struct cxgb4vf_debugfs_entry debugfs_files[] = { + { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, + { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, + { "resources", S_IRUGO, &resources_proc_fops }, + { "interfaces", S_IRUGO, &interfaces_proc_fops }, +}; + +/* + * Module and device initialization and cleanup code. + * ================================================== + */ + +/* + * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the + * directory (debugfs_root) has already been set up. + */ +static int __devinit setup_debugfs(struct adapter *adapter) +{ + int i; + + BUG_ON(adapter->debugfs_root == NULL); + + /* + * Debugfs support is best effort. + */ + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + (void)debugfs_create_file(debugfs_files[i].name, + debugfs_files[i].mode, + adapter->debugfs_root, + (void *)adapter, + debugfs_files[i].fops); + + return 0; +} + +/* + * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave + * it to our caller to tear down the directory (debugfs_root). + */ +static void __devexit cleanup_debugfs(struct adapter *adapter) +{ + BUG_ON(adapter->debugfs_root == NULL); + + /* + * Unlike our sister routine cleanup_proc(), we don't need to remove + * individual entries because a call will be made to + * debugfs_remove_recursive(). We just need to clean up any ancillary + * persistent state. + */ + /* nothing to do */ +} + +/* + * Perform early "adapter" initialization. This is where we discover what + * adapter parameters we're going to be using and initialize basic adapter + * hardware support. + */ +static int adap_init0(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct sge_params *sge_params = &adapter->params.sge; + struct sge *s = &adapter->sge; + unsigned int ethqsets; + int err; + + /* + * Wait for the device to become ready before proceeding ... + */ + err = t4vf_wait_dev_ready(adapter); + if (err) { + dev_err(adapter->pdev_dev, "device didn't become ready:" + " err=%d\n", err); + return err; + } + + /* + * Grab basic operational parameters. These will predominantly have + * been set up by the Physical Function Driver or will be hard coded + * into the adapter. We just have to live with them ... Note that + * we _must_ get our VPD parameters before our SGE parameters because + * we need to know the adapter's core clock from the VPD in order to + * properly decode the SGE Timer Values. + */ + err = t4vf_get_dev_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " device parameters: err=%d\n", err); + return err; + } + err = t4vf_get_vpd_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " VPD parameters: err=%d\n", err); + return err; + } + err = t4vf_get_sge_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " SGE parameters: err=%d\n", err); + return err; + } + err = t4vf_get_rss_glb_config(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " RSS parameters: err=%d\n", err); + return err; + } + if (adapter->params.rss.mode != + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + dev_err(adapter->pdev_dev, "unable to operate with global RSS" + " mode %d\n", adapter->params.rss.mode); + return -EINVAL; + } + err = t4vf_sge_init(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to use adapter parameters:" + " err=%d\n", err); + return err; + } + + /* + * Retrieve our RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + s->timer_val[0] = core_ticks_to_us(adapter, + TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adapter, + TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adapter, + TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adapter, + TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adapter, + TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adapter, + TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5)); + + s->counter_val[0] = + THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold); + s->counter_val[1] = + THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold); + s->counter_val[2] = + THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold); + s->counter_val[3] = + THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold); + + /* + * Grab our Virtual Interface resource allocation, extract the + * features that we're interested in and do a bit of sanity testing on + * what we discover. + */ + err = t4vf_get_vfres(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to get virtual interface" + " resources: err=%d\n", err); + return err; + } + + /* + * The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d allowed" + " virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* + * We need to reserve a number of the ingress queues with Free List + * and Interrupt capabilities for special interrupt purposes (like + * asynchronous firmware messages, or forwarded interrupts if we're + * using MSI). The rest of the FL/Intr-capable ingress queues will be + * matched up one-for-one with Ethernet/Control egress queues in order + * to form "Queue Sets" which will be aportioned between the "ports". + * For each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + */ + ethqsets = vfres->niqflint - INGQ_EXTRAS; + if (vfres->nethctrl != ethqsets) { + dev_warn(adapter->pdev_dev, "unequal number of [available]" + " ingress/egress queues (%d/%d); using minimum for" + " number of Queue Sets\n", ethqsets, vfres->nethctrl); + ethqsets = min(vfres->nethctrl, ethqsets); + } + if (vfres->neq < ethqsets*2) { + dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)" + " to support Queue Sets (%d); reducing allowed Queue" + " Sets\n", vfres->neq, ethqsets); + ethqsets = vfres->neq/2; + } + if (ethqsets > MAX_ETH_QSETS) { + dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue" + " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets); + ethqsets = MAX_ETH_QSETS; + } + if (vfres->niq != 0 || vfres->neq > ethqsets*2) { + dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)" + " ignored\n", vfres->niq, vfres->neq - ethqsets*2); + } + adapter->sge.max_ethqsets = ethqsets; + + /* + * Check for various parameter sanity issues. Most checks simply + * result in us using fewer resources than our provissioning but we + * do need at least one "port" with which to work ... + */ + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } + if (adapter->params.nports == 0) { + dev_err(adapter->pdev_dev, "no virtual interfaces configured/" + "usable!\n"); + return -EINVAL; + } + return 0; +} + +static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx, + u8 pkt_cnt_idx, unsigned int size, + unsigned int iqe_size) +{ + rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | + (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0)); + rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS + ? pkt_cnt_idx + : 0); + rspq->iqe_len = iqe_size; + rspq->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and + * type of ports we found and the number of available CPUs. Most settings can + * be modified by the admin via ethtool and cxgbtool prior to the adapter + * being brought up for the first time. + */ +static void __devinit cfg_queues(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int q10g, n10g, qidx, pidx, qs; + + /* + * We should not be called till we know how many Queue Sets we can + * support. In particular, this means that we need to know what kind + * of interrupts we'll be using ... + */ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + + /* + * Count the number of 10GbE Virtual Interfaces that we have. + */ + n10g = 0; + for_each_port(adapter, pidx) + n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); + + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g == 0) + q10g = 0; + else { + int n1g = (adapter->params.nports - n10g); + q10g = (adapter->sge.max_ethqsets - n1g) / n10g; + if (q10g > num_online_cpus()) + q10g = num_online_cpus(); + } + + /* + * Allocate the "Queue Sets" to the various Virtual Interfaces. + * The layout will be established in setup_sge_queues() when the + * adapter is brough up for the first time. + */ + qidx = 0; + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + + pi->first_qset = qidx; + pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } + s->ethqsets = qidx; + + /* + * Set up default Queue Set parameters ... Start off with the + * shortest interrupt holdoff timer. + */ + for (qs = 0; qs < s->max_ethqsets; qs++) { + struct sge_eth_rxq *rxq = &s->ethrxq[qs]; + struct sge_eth_txq *txq = &s->ethtxq[qs]; + + init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); + rxq->fl.size = 72; + txq->q.size = 1024; + } + + /* + * The firmware event queue is used for link state changes and + * notifications of TX DMA completions. + */ + init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, + L1_CACHE_BYTES); + + /* + * The forwarded interrupt queue is used when we're in MSI interrupt + * mode. In this mode all interrupts associated with RX queues will + * be forwarded to a single queue which we'll associate with our MSI + * interrupt vector. The messages dropped in the forwarded interrupt + * queue will indicate which ingress queue needs servicing ... This + * queue needs to be large enough to accommodate all of the ingress + * queues which are forwarding their interrupt (+1 to prevent the PIDX + * from equalling the CIDX if every ingress queue has an outstanding + * interrupt). The queue doesn't need to be any larger because no + * ingress queue will ever have more than one outstanding interrupt at + * any time ... + */ + init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, + L1_CACHE_BYTES); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void __devinit reduce_ethqs(struct adapter *adapter, int n) +{ + int i; + struct port_info *pi; + + /* + * While we have too many active Ether Queue Sets, interate across the + * "ports" and reduce their individual Queue Set allocations. + */ + BUG_ON(n < adapter->params.nports); + while (n < adapter->sge.ethqsets) + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adapter->sge.ethqsets--; + if (adapter->sge.ethqsets <= n) + break; + } + } + + /* + * Reassign the starting Queue Sets for each of the "ports" ... + */ + n = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* + * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally + * we get a separate MSI-X vector for every "Queue Set" plus any extras we + * need. Minimally we need one for every Virtual Interface plus those needed + * for our "extras". Note that this process may lower the maximum number of + * allowed Queue Sets ... + */ +static int __devinit enable_msix(struct adapter *adapter) +{ + int i, err, want, need; + struct msix_entry entries[MSIX_ENTRIES]; + struct sge *s = &adapter->sge; + + for (i = 0; i < MSIX_ENTRIES; ++i) + entries[i].entry = i; + + /* + * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets" + * plus those needed for our "extras" (for example, the firmware + * message queue). We _need_ at least one "Queue Set" per Virtual + * Interface plus those needed for our "extras". So now we get to see + * if the song is right ... + */ + want = s->max_ethqsets + MSIX_EXTRAS; + need = adapter->params.nports + MSIX_EXTRAS; + while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need) + want = err; + + if (err == 0) { + int nqsets = want - MSIX_EXTRAS; + if (nqsets < s->max_ethqsets) { + dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" + " for %d Queue Sets\n", nqsets); + s->max_ethqsets = nqsets; + if (nqsets < s->ethqsets) + reduce_ethqs(adapter, nqsets); + } + for (i = 0; i < want; ++i) + adapter->msix_info[i].vec = entries[i].vector; + } else if (err > 0) { + pci_disable_msix(adapter->pdev); + dev_info(adapter->pdev_dev, "only %d MSI-X vectors left," + " not using MSI-X\n", err); + } + return err; +} + +#ifdef HAVE_NET_DEVICE_OPS +static const struct net_device_ops cxgb4vf_netdev_ops = { + .ndo_open = cxgb4vf_open, + .ndo_stop = cxgb4vf_stop, + .ndo_start_xmit = t4vf_eth_xmit, + .ndo_get_stats = cxgb4vf_get_stats, + .ndo_set_rx_mode = cxgb4vf_set_rxmode, + .ndo_set_mac_address = cxgb4vf_set_mac_addr, + .ndo_select_queue = cxgb4vf_select_queue, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb4vf_do_ioctl, + .ndo_change_mtu = cxgb4vf_change_mtu, + .ndo_vlan_rx_register = cxgb4vf_vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb4vf_poll_controller, +#endif +}; +#endif + +/* + * "Probe" a device: initialize a device and construct all kernel and driver + * state needed to manage the device. This routine is called "init_one" in + * the PF Driver ... + */ +static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int version_printed; + + int pci_using_dac; + int err, pidx; + unsigned int pmask; + struct adapter *adapter; + struct port_info *pi; + struct net_device *netdev; + + /* + * Vet our module parameters. + */ + if (msi != MSI_MSIX && msi != MSI_MSI) { + dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" + " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, + MSI_MSI); + err = -EINVAL; + goto err_out; + } + + /* + * Print our driver banner the first time we're called to initialize a + * device. + */ + if (version_printed == 0) { + printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + version_printed = 1; + } + + /* + * Reserve PCI resources for the device. If we can't get them some + * other driver may have already claimed the device ... + */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + return err; + } + + /* + * Initialize generic PCI device state. + */ + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_release_regions; + } + + /* + * Set up our DMA mask: try for 64-bit address masking first and + * fall back to 32-bit if we can't get 64 bits ... + */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err == 0) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" + " coherent allocations\n"); + goto err_disable_device; + } + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err != 0) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_disable_device; + } + pci_using_dac = 0; + } + + /* + * Enable bus mastering for the device ... + */ + pci_set_master(pdev); + + /* + * Allocate our adapter data structure and attach it to the device. + */ + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto err_disable_device; + } + pci_set_drvdata(pdev, adapter); + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + + /* + * Initialize SMP data synchronization resources. + */ + spin_lock_init(&adapter->stats_lock); + + /* + * Map our I/O registers in BAR0. + */ + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto err_free_adapter; + } + + /* + * Initialize adapter level features. + */ + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + err = adap_init0(adapter); + if (err) + goto err_unmap_bar; + + /* + * Allocate our "adapter ports" and stitch everything together. + */ + pmask = adapter->params.vfres.pmask; + for_each_port(adapter, pidx) { + int port_id, viid; + + /* + * We simplistically allocate our virtual interfaces + * sequentially across the port numbers to which we have + * access rights. This should be configurable in some manner + * ... + */ + if (pmask == 0) + break; + port_id = ffs(pmask) - 1; + pmask &= ~(1 << port_id); + viid = t4vf_alloc_vi(adapter, port_id); + if (viid < 0) { + dev_err(&pdev->dev, "cannot allocate VI for port %d:" + " err=%d\n", port_id, viid); + err = viid; + goto err_free_dev; + } + + /* + * Allocate our network device and stitch things together. + */ + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_PORT_QSETS); + if (netdev == NULL) { + dev_err(&pdev->dev, "cannot allocate netdev for" + " port %d\n", port_id); + t4vf_free_vi(adapter, viid); + err = -ENOMEM; + goto err_free_dev; + } + adapter->port[pidx] = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->pidx = pidx; + pi->port_id = port_id; + pi->viid = viid; + + /* + * Initialize the starting state of our "port" and register + * it. + */ + pi->xact_addr_filt = -1; + pi->rx_offload = RX_CSO; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev->irq = pdev->irq; + + netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | + NETIF_F_GRO); + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features = + (netdev->features & + ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)); + +#ifdef HAVE_NET_DEVICE_OPS + netdev->netdev_ops = &cxgb4vf_netdev_ops; +#else + netdev->vlan_rx_register = cxgb4vf_vlan_rx_register; + netdev->open = cxgb4vf_open; + netdev->stop = cxgb4vf_stop; + netdev->hard_start_xmit = t4vf_eth_xmit; + netdev->get_stats = cxgb4vf_get_stats; + netdev->set_rx_mode = cxgb4vf_set_rxmode; + netdev->do_ioctl = cxgb4vf_do_ioctl; + netdev->change_mtu = cxgb4vf_change_mtu; + netdev->set_mac_address = cxgb4vf_set_mac_addr; + netdev->select_queue = cxgb4vf_select_queue; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = cxgb4vf_poll_controller; +#endif +#endif + SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops); + + /* + * Initialize the hardware/software state for the port. + */ + err = t4vf_port_init(adapter, pidx); + if (err) { + dev_err(&pdev->dev, "cannot initialize port %d\n", + pidx); + goto err_free_dev; + } + } + + /* + * The "card" is now ready to go. If any errors occur during device + * registration we do not fail the whole "card" but rather proceed + * only with the ports we manage to register successfully. However we + * must register at least one net device. + */ + for_each_port(adapter, pidx) { + netdev = adapter->port[pidx]; + if (netdev == NULL) + continue; + + err = register_netdev(netdev); + if (err) { + dev_warn(&pdev->dev, "cannot register net device %s," + " skipping\n", netdev->name); + continue; + } + + set_bit(pidx, &adapter->registered_device_map); + } + if (adapter->registered_device_map == 0) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto err_free_dev; + } + + /* + * Set up our debugfs entries. + */ + if (cxgb4vf_debugfs_root) { + adapter->debugfs_root = + debugfs_create_dir(pci_name(pdev), + cxgb4vf_debugfs_root); + if (adapter->debugfs_root == NULL) + dev_warn(&pdev->dev, "could not create debugfs" + " directory"); + else + setup_debugfs(adapter); + } + + /* + * See what interrupts we'll be using. If we've been configured to + * use MSI-X interrupts, try to enable them but fall back to using + * MSI interrupts if we can't enable MSI-X interrupts. If we can't + * get MSI interrupts we bail with the error. + */ + if (msi == MSI_MSIX && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else { + err = pci_enable_msi(pdev); + if (err) { + dev_err(&pdev->dev, "Unable to allocate %s interrupts;" + " err=%d\n", + msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); + goto err_free_debugfs; + } + adapter->flags |= USING_MSI; + } + + /* + * Now that we know how many "ports" we have and what their types are, + * and how many Queue Sets we can support, we can configure our queue + * resources. + */ + cfg_queues(adapter); + + /* + * Print a short notice on the existance and configuration of the new + * VF network device ... + */ + for_each_port(adapter, pidx) { + dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n", + adapter->port[pidx]->name, + (adapter->flags & USING_MSIX) ? "MSI-X" : + (adapter->flags & USING_MSI) ? "MSI" : ""); + } + + /* + * Return success! + */ + return 0; + + /* + * Error recovery and exit code. Unwind state that's been created + * so far and return the error. + */ + +err_free_debugfs: + if (adapter->debugfs_root) { + cleanup_debugfs(adapter); + debugfs_remove_recursive(adapter->debugfs_root); + } + +err_free_dev: + for_each_port(adapter, pidx) { + netdev = adapter->port[pidx]; + if (netdev == NULL) + continue; + pi = netdev_priv(netdev); + t4vf_free_vi(adapter, pi->viid); + if (test_bit(pidx, &adapter->registered_device_map)) + unregister_netdev(netdev); + free_netdev(netdev); + } + +err_unmap_bar: + iounmap(adapter->regs); + +err_free_adapter: + kfree(adapter); + pci_set_drvdata(pdev, NULL); + +err_disable_device: + pci_disable_device(pdev); + pci_clear_master(pdev); + +err_release_regions: + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + +err_out: + return err; +} + +/* + * "Remove" a device: tear down all kernel and driver state created in the + * "probe" routine and quiesce the device (disable interrupts, etc.). (Note + * that this is called "remove_one" in the PF Driver.) + */ +static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + /* + * Tear down driver state associated with device. + */ + if (adapter) { + int pidx; + + /* + * Stop all of our activity. Unregister network port, + * disable interrupts, etc. + */ + for_each_port(adapter, pidx) + if (test_bit(pidx, &adapter->registered_device_map)) + unregister_netdev(adapter->port[pidx]); + t4vf_sge_stop(adapter); + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } + + /* + * Tear down our debugfs entries. + */ + if (adapter->debugfs_root) { + cleanup_debugfs(adapter); + debugfs_remove_recursive(adapter->debugfs_root); + } + + /* + * Free all of the various resources which we've acquired ... + */ + t4vf_free_sge_resources(adapter); + for_each_port(adapter, pidx) { + struct net_device *netdev = adapter->port[pidx]; + struct port_info *pi; + + if (netdev == NULL) + continue; + + pi = netdev_priv(netdev); + t4vf_free_vi(adapter, pi->viid); + free_netdev(netdev); + } + iounmap(adapter->regs); + kfree(adapter); + pci_set_drvdata(pdev, NULL); + } + + /* + * Disable the device and release its PCI resources. + */ + pci_disable_device(pdev); + pci_clear_master(pdev); + pci_release_regions(pdev); +} + +/* + * PCI Device registration data structures. + */ +#define CH_DEVICE(devid, idx) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } + +static struct pci_device_id cxgb4vf_pci_tbl[] = { + CH_DEVICE(0xb000, 0), /* PE10K FPGA */ + CH_DEVICE(0x4800, 0), /* T440-dbg */ + CH_DEVICE(0x4801, 0), /* T420-cr */ + CH_DEVICE(0x4802, 0), /* T422-cr */ + { 0, } +}; + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl); + +static struct pci_driver cxgb4vf_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4vf_pci_tbl, + .probe = cxgb4vf_pci_probe, + .remove = __devexit_p(cxgb4vf_pci_remove), +}; + +/* + * Initialize global driver state. + */ +static int __init cxgb4vf_module_init(void) +{ + int ret; + + /* Debugfs support is optional, just warn if this fails */ + cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cxgb4vf_debugfs_root) + printk(KERN_WARNING KBUILD_MODNAME ": could not create" + " debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4vf_driver); + if (ret < 0) + debugfs_remove(cxgb4vf_debugfs_root); + return ret; +} + +/* + * Tear down global driver state. + */ +static void __exit cxgb4vf_module_exit(void) +{ + pci_unregister_driver(&cxgb4vf_driver); + debugfs_remove(cxgb4vf_debugfs_root); +} + +module_init(cxgb4vf_module_init); +module_exit(cxgb4vf_module_exit); diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c new file mode 100644 index 0000000000000000000000000000000000000000..eb5a1c9cb2d3b588a309ea9cefc326ef1ee71b81 --- /dev/null +++ b/drivers/net/cxgb4vf/sge.c @@ -0,0 +1,2454 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4fw_api.h" +#include "../cxgb4/t4_msg.h" + +/* + * Decoded Adapter Parameters. + */ +static u32 FL_PG_ORDER; /* large page allocation size */ +static u32 STAT_LEN; /* length of status page at ring end */ +static u32 PKTSHIFT; /* padding between CPL and packet data */ +static u32 FL_ALIGN; /* response queue message alignment */ + +/* + * Constants ... + */ +enum { + /* + * Egress Queue sizes, producer and consumer indices are all in units + * of Egress Context Units bytes. Note that as far as the hardware is + * concerned, the free list is an Egress Queue (the host produces free + * buffers which the hardware consumes) and free list entries are + * 64-bit PCI DMA addresses. + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + + /* + * Max number of TX descriptors we clean up at a time. Should be + * modest as freeing skbs isn't cheap and it happens while holding + * locks. We just need to free packets faster than they arrive, we + * eventually catch up and keep the amortized cost reasonable. + */ + MAX_TX_RECLAIM = 16, + + /* + * Max number of Rx buffers we replenish at a time. Again keep this + * modest, allocating buffers isn't cheap either. + */ + MAX_RX_REFILL = 16, + + /* + * Period of the Rx queue check timer. This timer is infrequent as it + * has something to do only when the system experiences severe memory + * shortage. + */ + RX_QCHECK_PERIOD = (HZ / 2), + + /* + * Period of the TX queue check timer and the maximum number of TX + * descriptors to be reclaimed by the TX timer. + */ + TX_QCHECK_PERIOD = (HZ / 2), + MAX_TIMER_TX_RECLAIM = 100, + + /* + * An FL with <= FL_STARVE_THRES buffers is starving and a periodic + * timer will attempt to refill it. + */ + FL_STARVE_THRES = 4, + + /* + * Suspend an Ethernet TX queue with fewer available descriptors than + * this. We always want to have room for a maximum sized packet: + * inline immediate data + MAX_SKB_FRAGS. This is the same as + * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS + * (see that function and its helpers for a description of the + * calculation). + */ + ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, + ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 + + ((ETHTXQ_MAX_FRAGS-1) & 1) + + 2), + ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), + ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, + + ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), + + /* + * Max TX descriptor space we allow for an Ethernet packet to be + * inlined into a WR. This is limited by the maximum value which + * we can specify for immediate data in the firmware Ethernet TX + * Work Request. + */ + MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, + + /* + * Max size of a WR sent through a control TX queue. + */ + MAX_CTRL_WR_LEN = 256, + + /* + * Maximum amount of data which we'll ever need to inline into a + * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN). + */ + MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN + ? MAX_IMM_TX_PKT_LEN + : MAX_CTRL_WR_LEN), + + /* + * For incoming packets less than RX_COPY_THRES, we copy the data into + * an skb rather than referencing the data. We allocate enough + * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes + * of the data (header). + */ + RX_COPY_THRES = 256, + RX_PULL_LEN = 128, +}; + +/* + * Can't define this in the above enum because PKTSHIFT isn't a constant in + * the VF Driver ... + */ +#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) + +/* + * Software state per TX descriptor. + */ +struct tx_sw_desc { + struct sk_buff *skb; /* socket buffer of TX data source */ + struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */ +}; + +/* + * Software state per RX Free List descriptor. We keep track of the allocated + * FL page, its size, and its PCI DMA address (if the page is mapped). The FL + * page size and its PCI DMA mapped state are stored in the low bits of the + * PCI DMA address as per below. + */ +struct rx_sw_desc { + struct page *page; /* Free List page buffer */ + dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ + /* and flags (see below) */ +}; + +/* + * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the + * SGE also uses the low 4 bits to determine the size of the buffer. It uses + * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. + * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 + * bits can only contain a 0 or a 1 to indicate which size buffer we're giving + * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is + * maintained in an inverse sense so the hardware never sees that bit high. + */ +enum { + RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */ + RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ +}; + +/** + * get_buf_addr - return DMA buffer address of software descriptor + * @sdesc: pointer to the software buffer descriptor + * + * Return the DMA buffer address of a software descriptor (stripping out + * our low-order flag bits). + */ +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc) +{ + return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); +} + +/** + * is_buf_mapped - is buffer mapped for DMA? + * @sdesc: pointer to the software buffer descriptor + * + * Determine whether the buffer associated with a software descriptor in + * mapped for DMA or not. + */ +static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) +{ + return !(sdesc->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * need_skb_unmap - does the platform need unmapping of sk_buffs? + * + * Returns true if the platfrom needs sk_buff unmapping. The compiler + * optimizes away unecessary code if this returns true. + */ +static inline int need_skb_unmap(void) +{ +#ifdef CONFIG_NEED_DMA_MAP_STATE + return 1; +#else + return 0; +#endif +} + +/** + * txq_avail - return the number of available slots in a TX queue + * @tq: the TX queue + * + * Returns the number of available descriptors in a TX queue. + */ +static inline unsigned int txq_avail(const struct sge_txq *tq) +{ + return tq->size - 1 - tq->in_use; +} + +/** + * fl_cap - return the capacity of a Free List + * @fl: the Free List + * + * Returns the capacity of a Free List. The capacity is less than the + * size because an Egress Queue Index Unit worth of descriptors needs to + * be left unpopulated, otherwise the Producer and Consumer indices PIDX + * and CIDX will match and the hardware will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - FL_PER_EQ_UNIT; +} + +/** + * fl_starving - return whether a Free List is starving. + * @fl: the Free List + * + * Tests specified Free List to see whether the number of buffers + * available to the hardware has falled below our "starvation" + * threshhold. + */ +static inline bool fl_starving(const struct sge_fl *fl) +{ + return fl->avail - fl->pend_cred <= FL_STARVE_THRES; +} + +/** + * map_skb - map an skb for DMA to the device + * @dev: the egress net device + * @skb: the packet to map + * @addr: a pointer to the base of the DMA mapping array + * + * Map an skb for DMA to the device and return an array of DMA addresses. + */ +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) { + *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); + +out_err: + return -ENOMEM; +} + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *tq) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), + be32_to_cpu(sgl->len0), DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), + be32_to_cpu(sgl->len0), DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { +unmap: + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)tq->stat) { + p = (const struct ulptx_sge_pair *)tq->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)tq->stat) { + const __be64 *addr = (const __be64 *)tq->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)tq->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)tq->stat) + p = (const struct ulptx_sge_pair *)tq->desc; + addr = ((u8 *)p + 16 <= (u8 *)tq->stat + ? p->addr[0] + : *(const __be64 *)tq->desc); + dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims TX descriptors and their buffers + * @adapter: the adapter + * @tq: the TX queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims TX descriptors from an SGE TX queue and frees the associated + * TX buffers. Called with the TX queue lock held. + */ +static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *sdesc; + unsigned int cidx = tq->cidx; + struct device *dev = adapter->pdev_dev; + + const int need_unmap = need_skb_unmap() && unmap; + + sdesc = &tq->sdesc[cidx]; + while (n--) { + /* + * If we kept a reference to the original TX skb, we need to + * unmap it from PCI DMA space (if required) and free it. + */ + if (sdesc->skb) { + if (need_unmap) + unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); + kfree_skb(sdesc->skb); + sdesc->skb = NULL; + } + + sdesc++; + if (++cidx == tq->size) { + cidx = 0; + sdesc = tq->sdesc; + } + } + tq->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a TX queue. + */ +static inline int reclaimable(const struct sge_txq *tq) +{ + int hw_cidx = be16_to_cpu(tq->stat->cidx); + int reclaimable = hw_cidx - tq->cidx; + if (reclaimable < 0) + reclaimable += tq->size; + return reclaimable; +} + +/** + * reclaim_completed_tx - reclaims completed TX descriptors + * @adapter: the adapter + * @tq: the TX queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims TX descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the TX + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adapter, + struct sge_txq *tq, + bool unmap) +{ + int avail = reclaimable(tq); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the TX lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adapter, tq, avail, unmap); + tq->in_use -= avail; + } +} + +/** + * get_buf_size - return the size of an RX Free List buffer. + * @sdesc: pointer to the software buffer descriptor + */ +static inline int get_buf_size(const struct rx_sw_desc *sdesc) +{ + return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) + ? (PAGE_SIZE << FL_PG_ORDER) + : PAGE_SIZE; +} + +/** + * free_rx_bufs - free RX buffers on an SGE Free List + * @adapter: the adapter + * @fl: the SGE Free List to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE Free List RX queue. The + * buffers must be made inaccessible to hardware before calling this + * function. + */ +static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) +{ + while (n--) { + struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; + + if (is_buf_mapped(sdesc)) + dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), + get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + put_page(sdesc->page); + sdesc->page = NULL; + if (++fl->cidx == fl->size) + fl->cidx = 0; + fl->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current RX buffer on an SGE Free List + * @adapter: the adapter + * @fl: the SGE Free List + * + * Unmap the current buffer on an SGE Free List RX queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + * This is used predominantly to "transfer ownership" of an FL buffer + * to another entity (typically an skb's fragment list). + */ +static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) +{ + struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; + + if (is_buf_mapped(sdesc)) + dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), + get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + sdesc->page = NULL; + if (++fl->cidx == fl->size) + fl->cidx = 0; + fl->avail--; +} + +/** + * ring_fl_db - righ doorbell on free list + * @adapter: the adapter + * @fl: the Free List whose doorbell should be rung ... + * + * Tell the Scatter Gather Engine that there are new free list entries + * available. + */ +static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) +{ + /* + * The SGE keeps track of its Producer and Consumer Indices in terms + * of Egress Queue Units so we can only tell it about integral numbers + * of multiples of Free List Entries per Egress Queue Units ... + */ + if (fl->pend_cred >= FL_PER_EQ_UNIT) { + wmb(); + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + DBPRIO | + QID(fl->cntxt_id) | + PIDX(fl->pend_cred / FL_PER_EQ_UNIT)); + fl->pend_cred %= FL_PER_EQ_UNIT; + } +} + +/** + * set_rx_sw_desc - initialize software RX buffer descriptor + * @sdesc: pointer to the softwore RX buffer descriptor + * @page: pointer to the page data structure backing the RX buffer + * @dma_addr: PCI DMA address (possibly with low-bit flags) + */ +static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page, + dma_addr_t dma_addr) +{ + sdesc->page = page; + sdesc->dma_addr = dma_addr; +} + +/* + * Support for poisoning RX buffers ... + */ +#define POISON_BUF_VAL -1 + +static inline void poison_buf(struct page *page, size_t sz) +{ +#if POISON_BUF_VAL >= 0 + memset(page_address(page), POISON_BUF_VAL, sz); +#endif +} + +/** + * refill_fl - refill an SGE RX buffer ring + * @adapter: the adapter + * @fl: the Free List ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN + * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number + * of buffers allocated. If afterwards the queue is found critically low, + * mark it as starving in the bitmap of starving FLs. + */ +static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, + int n, gfp_t gfp) +{ + struct page *page; + dma_addr_t dma_addr; + unsigned int cred = fl->avail; + __be64 *d = &fl->desc[fl->pidx]; + struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; + + /* + * Sanity: ensure that the result of adding n Free List buffers + * won't result in wrapping the SGE's Producer Index around to + * it's Consumer Index thereby indicating an empty Free List ... + */ + BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); + + /* + * If we support large pages, prefer large buffers and fail over to + * small pages if we can't allocate large pages to satisfy the refill. + * If we don't support large pages, drop directly into the small page + * allocation code. + */ + if (FL_PG_ORDER == 0) + goto alloc_small_pages; + + while (n) { + page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, + FL_PG_ORDER); + if (unlikely(!page)) { + /* + * We've failed inour attempt to allocate a "large + * page". Fail over to the "small page" allocation + * below. + */ + fl->large_alloc_failed++; + break; + } + poison_buf(page, PAGE_SIZE << FL_PG_ORDER); + + dma_addr = dma_map_page(adapter->pdev_dev, page, 0, + PAGE_SIZE << FL_PG_ORDER, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { + /* + * We've run out of DMA mapping space. Free up the + * buffer and return with what we've managed to put + * into the free list. We don't want to fail over to + * the small page allocation below in this case + * because DMA mapping resources are typically + * critical resources once they become scarse. + */ + __free_pages(page, FL_PG_ORDER); + goto out; + } + dma_addr |= RX_LARGE_BUF; + *d++ = cpu_to_be64(dma_addr); + + set_rx_sw_desc(sdesc, page, dma_addr); + sdesc++; + + fl->avail++; + if (++fl->pidx == fl->size) { + fl->pidx = 0; + sdesc = fl->sdesc; + d = fl->desc; + } + n--; + } + +alloc_small_pages: + while (n--) { + page = __netdev_alloc_page(adapter->port[0], + gfp | __GFP_NOWARN); + if (unlikely(!page)) { + fl->alloc_failed++; + break; + } + poison_buf(page, PAGE_SIZE); + + dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { + netdev_free_page(adapter->port[0], page); + break; + } + *d++ = cpu_to_be64(dma_addr); + + set_rx_sw_desc(sdesc, page, dma_addr); + sdesc++; + + fl->avail++; + if (++fl->pidx == fl->size) { + fl->pidx = 0; + sdesc = fl->sdesc; + d = fl->desc; + } + } + +out: + /* + * Update our accounting state to incorporate the new Free List + * buffers, tell the hardware about them and return the number of + * bufers which we were able to allocate. + */ + cred = fl->avail - cred; + fl->pend_cred += cred; + ring_fl_db(adapter, fl); + + if (unlikely(fl_starving(fl))) { + smp_wmb(); + set_bit(fl->cntxt_id, adapter->sge.starving_fl); + } + + return cred; +} + +/* + * Refill a Free List to its capacity or the Maximum Refill Increment, + * whichever is smaller ... + */ +static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) +{ + refill_fl(adapter, fl, + min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @hwsize: the size of each hardware descriptor + * @swsize: the size of each software descriptor + * @busaddrp: the physical PCI bus address of the allocated ring + * @swringp: return address pointer for software ring + * @stat_size: extra space in hardware ring for status information + * + * Allocates resources for an SGE descriptor ring, such as TX queues, + * free buffer lists, response queues, etc. Each SGE ring requires + * space for its hardware descriptors plus, optionally, space for software + * state associated with each hardware entry (the metadata). The function + * returns three values: the virtual address for the hardware ring (the + * return value of the function), the PCI bus address of the hardware + * ring (in *busaddrp), and the address of the software ring (in swringp). + * Both the hardware and software rings are returned zeroed out. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, + size_t swsize, dma_addr_t *busaddrp, void *swringp, + size_t stat_size) +{ + /* + * Allocate the hardware ring and PCI DMA bus address space for said. + */ + size_t hwlen = nelem * hwsize + stat_size; + void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); + + if (!hwring) + return NULL; + + /* + * If the caller wants a software ring, allocate it and return a + * pointer to it in *swringp. + */ + BUG_ON((swsize != 0) != (swringp != NULL)); + if (swsize) { + void *swring = kcalloc(nelem, swsize, GFP_KERNEL); + + if (!swring) { + dma_free_coherent(dev, hwlen, hwring, *busaddrp); + return NULL; + } + *(void **)swringp = swring; + } + + /* + * Zero out the hardware ring and return its address as our function + * value. + */ + memset(hwring, 0, hwlen); + return hwring; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits (8-byte units) needed for a Direct + * Scatter/Gather List that can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* + * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of TX descriptors for the given flits + * @flits: the number of flits + * + * Returns the number of TX descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int flits) +{ + BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64)); + return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit completely as + * immediate data. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + /* + * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request + * which does not accommodate immediate data. We could dike out all + * of the support code for immediate data but that would tie our hands + * too much if we ever want to enhace the firmware. It would also + * create more differences between the PF and VF Drivers. + */ + return false; +} + +/** + * calc_tx_flits - calculate the number of flits for a packet TX WR + * @skb: the packet + * + * Returns the number of flits needed for a TX Work Request for the + * given Ethernet packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + /* + * If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), + sizeof(__be64)); + + /* + * Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embeded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * write_sgl - populate a Scatter/Gather List for a packet + * @skb: the packet + * @tq: the TX queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of DMA bus addresses for the SGL elements + * + * Generates a Scatter/Gather List for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @pos must be 16-byte + * aligned and within a TX descriptor with available space. @end points + * write after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @tq->stat. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(si->frags[0].size); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | + ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(si->frags[++i].size); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)tq->stat)) { + unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)tq->stat; + memcpy(tq->desc, (u8 *)buf + part0, part1); + end = (void *)tq->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +/** + * check_ring_tx_db - check and potentially ring a TX queue's doorbell + * @adapter: the adapter + * @tq: the TX queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a TX queue. + */ +static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, + int n) +{ + /* + * Warn if we write doorbells with the wrong priority and write + * descriptors before telling HW. + */ + WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO); + wmb(); + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(tq->cntxt_id) | PIDX(n)); +} + +/** + * inline_tx_skb - inline a packet's data into TX descriptors + * @skb: the packet + * @tq: the TX queue where the packet will be inlined + * @pos: starting position in the TX queue to inline the packet + * + * Inline a packet's contents directly into TX descriptors, starting at + * the given position within the TX DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, + void *pos) +{ + u64 *p; + int left = (void *)tq->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, tq->desc, skb->len - left); + pos = (void *)tq->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: + /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +/* + * Stop an Ethernet TX queue and record that state change. + */ +static void txq_stop(struct sge_eth_txq *txq) +{ + netif_tx_stop_queue(txq->txq); + txq->q.stops++; +} + +/* + * Advance our software state for a TX queue by adding n in use descriptors. + */ +static inline void txq_advance(struct sge_txq *tq, unsigned int n) +{ + tq->in_use += n; + tq->pidx += n; + if (tq->pidx >= tq->size) + tq->pidx -= tq->size; +} + +/** + * t4vf_eth_xmit - add a packet to an Ethernet TX queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. + */ +int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adapter; + struct sge_eth_txq *txq; + const struct port_info *pi; + struct fw_eth_tx_pkt_vm_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci)); + + /* + * The chip minimum packet length is 10 octets but the firmware + * command that we are using requires that we copy the Ethernet header + * (including the VLAN tag) into the header so we reject anything + * smaller than that ... + */ + if (unlikely(skb->len < fw_hdr_copy_len)) + goto out_free; + + /* + * Figure out which TX Queue we're going to use. + */ + pi = netdev_priv(dev); + adapter = pi->adapter; + qidx = skb_get_queue_mapping(skb); + BUG_ON(qidx >= pi->nqsets); + txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + + /* + * Take this opportunity to reclaim any TX Descriptors whose DMA + * transfers have completed. + */ + reclaim_completed_tx(adapter, &txq->q, true); + + /* + * Calculate the number of flits and TX Descriptors we're going to + * need along with how many TX Descriptors will be left over after + * we inject our Work Request. + */ + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + /* + * Not enough room for this packet's Work Request. Stop the + * TX Queue and return a "busy" condition. The queue will get + * started later on when the firmware informs us that space + * has opened up. + */ + txq_stop(txq); + dev_err(adapter->pdev_dev, + "%s: TX ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!is_eth_imm(skb) && + unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { + /* + * We need to map the skb into PCI DMA space (because it can't + * be in-lined directly into the Work Request) and the mapping + * operation failed. Record the error and drop the packet. + */ + txq->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* + * After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshhold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ + txq_stop(txq); + wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + } + + /* + * Start filling in our Work Request. Note that we do _not_ handle + * the WR Header wrapping around the TX Descriptor Ring. If our + * maximum header size ever exceeds one TX Descriptor, we'll need to + * do something else here. + */ + BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = cpu_to_be32(wr_mid); + wr->r3[0] = cpu_to_be64(0); + wr->r3[1] = cpu_to_be64(0); + skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + end = (u64 *)wr + flits; + + /* + * If this is a Large Send Offload packet we'll put in an LSO CPL + * message with an encapsulated TX Packet CPL message. Otherwise we + * just use a TX Packet CPL message. + */ + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = + cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN(sizeof(*lso) + + sizeof(*cpl))); + /* + * Fill in the LSO CPL message. + */ + lso->lso_ctrl = + cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | + LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len/4) | + LSO_IPHDR_LEN(l3hdr_len/4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->ipid_ofst = cpu_to_be16(0); + lso->mss = cpu_to_be16(ssi->gso_size); + lso->seqno_offset = cpu_to_be32(0); + lso->len = cpu_to_be32(skb->len); + + /* + * Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(lso + 1); + cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len)); + txq->tso++; + txq->tx_cso += ssi->gso_segs; + } else { + int len; + + len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + wr->op_immdlen = + cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN(len)); + + /* + * Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + txq->tx_cso++; + } else + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } + + /* + * If there's a VLAN tag present, add that to the list of things to + * do in this Work Request. + */ + if (vlan_tx_tag_present(skb)) { + txq->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + } + + /* + * Fill in the TX Packet CPL message header. + */ + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->port_id) | + TXPKT_PF(0)); + cpl->pack = cpu_to_be16(0); + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + +#ifdef T4_TRACE + T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], + "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u", + ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); +#endif + + /* + * Fill in the body of the TX Packet CPL message with either in-lined + * data or a Scatter/Gather List. + */ + if (is_eth_imm(skb)) { + /* + * In-line the packet's data and free the skb since we don't + * need it any longer. + */ + inline_tx_skb(skb, &txq->q, cpl + 1); + dev_kfree_skb(skb); + } else { + /* + * Write the skb's Scatter/Gather list into the TX Packet CPL + * message and retain a pointer to the skb so we can free it + * later when its DMA completes. (We store the skb pointer + * in the Software Descriptor corresponding to the last TX + * Descriptor used by the Work Request.) + * + * The retained skb will be freed when the corresponding TX + * Descriptors are reclaimed after their DMAs complete. + * However, this could take quite a while since, in general, + * the hardware is set up to be lazy about sending DMA + * completion notifications to us and we mostly perform TX + * reclaims in the transmit routine. + * + * This is good for performamce but means that we rely on new + * TX packets arriving to run the destructors of completed + * packets, which open up space in their sockets' send queues. + * Sometimes we do not get such new packets causing TX to + * stall. A single UDP transmitter is a good example of this + * situation. We have a clean up timer that periodically + * reclaims completed packets but it doesn't run often enough + * (nor do we want it to) to prevent lengthy stalls. A + * solution to this problem is to run the destructor early, + * after the packet is queued but before it's DMAd. A con is + * that we lie to socket memory accounting, but the amount of + * extra memory is reasonable (limited by the number of TX + * descriptors), the packets do actually get freed quickly by + * new packets almost always, and for protocols like TCP that + * wait for acks to really free up the data the extra memory + * is even less. On the positive side we run the destructors + * on the sending CPU rather than on a potentially different + * completing CPU, usually a good thing. + * + * Run the destructor before telling the DMA engine about the + * packet to make sure it doesn't complete and get freed + * prematurely. + */ + struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); + struct sge_txq *tq = &txq->q; + int last_desc; + + /* + * If the Work Request header was an exact multiple of our TX + * Descriptor length, then it's possible that the starting SGL + * pointer lines up exactly with the end of our TX Descriptor + * ring. If that's the case, wrap around to the beginning + * here ... + */ + if (unlikely((void *)sgl == (void *)tq->stat)) { + sgl = (void *)tq->desc; + end = (void *)((void *)tq->desc + + ((void *)end - (void *)tq->stat)); + } + + write_sgl(skb, tq, sgl, end, 0, addr); + skb_orphan(skb); + + last_desc = tq->pidx + ndesc - 1; + if (last_desc >= tq->size) + last_desc -= tq->size; + tq->sdesc[last_desc].skb = skb; + tq->sdesc[last_desc].sgl = sgl; + } + + /* + * Advance our internal TX Queue state, tell the hardware about + * the new TX descriptors and return success. + */ + txq_advance(&txq->q, ndesc); + dev->trans_start = jiffies; + ring_tx_db(adapter, &txq->q, ndesc); + return NETDEV_TX_OK; + +out_free: + /* + * An error of some sort happened. Free the TX skb and tell the + * OS that we've "dealt" with the packet ... + */ + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/** + * t4vf_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +void t4vf_pktgl_free(const struct pkt_gl *gl) +{ + int frag; + + frag = gl->nfrags - 1; + while (frag--) + put_page(gl->frags[frag].page); +} + +/** + * copy_frags - copy fragments from gather list into skb_shared_info + * @si: destination skb shared info structure + * @gl: source internal packet gather list + * @offset: packet start offset in first page + * + * Copy an internal packet gather list into a Linux skb_shared_info + * structure. + */ +static inline void copy_frags(struct skb_shared_info *si, + const struct pkt_gl *gl, + unsigned int offset) +{ + unsigned int n; + + /* usually there's just one frag */ + si->frags[0].page = gl->frags[0].page; + si->frags[0].page_offset = gl->frags[0].page_offset + offset; + si->frags[0].size = gl->frags[0].size - offset; + si->nr_frags = gl->nfrags; + + n = gl->nfrags - 1; + if (n) + memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[n].page); +} + +/** + * do_gro - perform Generic Receive Offload ingress packet processing + * @rxq: ingress RX Ethernet Queue + * @gl: gather list for ingress packet + * @pkt: CPL header for last packet fragment + * + * Perform Generic Receive Offload (GRO) ingress packet processing. + * We use the standard Linux GRO interfaces for this. + */ +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb_shinfo(skb), gl, PKTSHIFT); + skb->len = gl->tot_len - PKTSHIFT; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + + if (unlikely(pkt->vlan_ex)) { + struct port_info *pi = netdev_priv(rxq->rspq.netdev); + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) { + ret = vlan_gro_frags(&rxq->rspq.napi, grp, + be16_to_cpu(pkt->vlan)); + goto stats; + } + } + ret = napi_gro_frags(&rxq->rspq.napi); + +stats: + if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4vf_ethrx_handler - process an ingress ethernet packet + * @rspq: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @gl: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + struct port_info *pi; + struct skb_shared_info *ssi; + const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; + bool csum_ok = pkt->csum_calc && !pkt->err_vec; + unsigned int len = be16_to_cpu(pkt->len); + struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + + /* + * If this is a good TCP packet and we have Generic Receive Offload + * enabled, handle the packet in the GRO path. + */ + if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && + (rspq->netdev->features & NETIF_F_GRO) && csum_ok && + !pkt->ip_frag) { + do_gro(rxq, gl, pkt); + return 0; + } + + /* + * If the ingress packet is small enough, allocate an skb large enough + * for all of the data and copy it inline. Otherwise, allocate an skb + * with enough room to pull in the header and reference the rest of + * the data via the skb fragment list. + */ + if (len <= RX_COPY_THRES) { + /* small packets have only one fragment */ + skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); + if (!skb) + goto nomem; + __skb_put(skb, gl->frags[0].size); + skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size); + } else { + skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC); + if (!skb) + goto nomem; + __skb_put(skb, RX_PKT_PULL_LEN); + skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN); + + ssi = skb_shinfo(skb); + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = (gl->frags[0].page_offset + + RX_PKT_PULL_LEN); + ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN; + if (gl->nfrags > 1) + memcpy(&ssi->frags[1], &gl->frags[1], + (gl->nfrags-1) * sizeof(skb_frag_t)); + ssi->nr_frags = gl->nfrags; + skb->len = len + PKTSHIFT; + skb->data_len = skb->len - RX_PKT_PULL_LEN; + skb->truesize += skb->data_len; + + /* Get a reference for the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); + } + + __skb_pull(skb, PKTSHIFT); + skb->protocol = eth_type_trans(skb, rspq->netdev); + skb_record_rx_queue(skb, rspq->idx); + skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */ + pi = netdev_priv(skb->dev); + rxq->stats.pkts++; + + if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec && + (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { + if (!pkt->ip_frag) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + } + rxq->stats.rx_cso++; + } else + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(pkt->vlan_ex)) { + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) + vlan_hwaccel_receive_skb(skb, grp, + be16_to_cpu(pkt->vlan)); + else + dev_kfree_skb_any(skb); + } else + netif_receive_skb(skb); + + return 0; + +nomem: + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return 0; +} + +/** + * is_new_response - check if a response is newly written + * @rc: the response control descriptor + * @rspq: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *rc, + const struct sge_rspq *rspq) +{ + return RSPD_GEN(rc->type_gen) == rspq->gen; +} + +/** + * restore_rx_bufs - put back a packet's RX buffers + * @gl: the packet gather list + * @fl: the SGE Free List + * @nfrags: how many fragments in @si + * + * Called when we find out that the current packet, @si, can't be + * processed right away for some reason. This is a very rare event and + * there's no effort to make this suspension/resumption process + * particularly efficient. + * + * We implement the suspension by putting all of the RX buffers associated + * with the current packet back on the original Free List. The buffers + * have already been unmapped and are left unmapped, we mark them as + * unmapped in order to prevent further unmapping attempts. (Effectively + * this function undoes the series of @unmap_rx_buf calls which were done + * to create the current packet's gather list.) This leaves us ready to + * restart processing of the packet the next time we start processing the + * RX Queue ... + */ +static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, + int frags) +{ + struct rx_sw_desc *sdesc; + + while (frags--) { + if (fl->cidx == 0) + fl->cidx = fl->size - 1; + else + fl->cidx--; + sdesc = &fl->sdesc[fl->cidx]; + sdesc->page = gl->frags[frags].page; + sdesc->dma_addr |= RX_UNMAPPED_BUF; + fl->avail++; + } +} + +/** + * rspq_next - advance to the next entry in a response queue + * @rspq: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *rspq) +{ + rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; + if (unlikely(++rspq->cidx == rspq->size)) { + rspq->cidx = 0; + rspq->gen ^= 1; + rspq->cur_desc = rspq->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @rspq: the ingress response queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from a Scatter Gather Engine response queue up to + * the supplied budget. Responses include received packets as well as + * control messages from firmware or hardware. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +int process_responses(struct sge_rspq *rspq, int budget) +{ + struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + int budget_left = budget; + + while (likely(budget_left)) { + int ret, rsp_type; + const struct rsp_ctrl *rc; + + rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, rspq)) + break; + + /* + * Figure out what kind of response we've received from the + * SGE. + */ + rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + skb_frag_t *fp; + struct pkt_gl gl; + const struct rx_sw_desc *sdesc; + u32 bufsz, frag; + u32 len = be32_to_cpu(rc->pldbuflen_qid); + + /* + * If we get a "new buffer" message from the SGE we + * need to move on to the next Free List buffer. + */ + if (len & RSPD_NEWBUF) { + /* + * We get one "new buffer" message when we + * first start up a queue so we need to ignore + * it when our offset into the buffer is 0. + */ + if (likely(rspq->offset > 0)) { + free_rx_bufs(rspq->adapter, &rxq->fl, + 1); + rspq->offset = 0; + } + len = RSPD_LEN(len); + } + + /* + * Gather packet fragments. + */ + for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { + BUG_ON(frag >= MAX_SKB_FRAGS); + BUG_ON(rxq->fl.avail == 0); + sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(sdesc); + fp->page = sdesc->page; + fp->page_offset = rspq->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(rspq->adapter, &rxq->fl); + } + gl.nfrags = frag+1; + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access and start preloading first + * cache line ... + */ + dma_sync_single_for_cpu(rspq->adapter->pdev_dev, + get_buf_addr(sdesc), + fp->size, DMA_FROM_DEVICE); + gl.va = (page_address(gl.frags[0].page) + + gl.frags[0].page_offset); + prefetch(gl.va); + + /* + * Hand the new ingress packet to the handler for + * this Response Queue. + */ + ret = rspq->handler(rspq, rspq->cur_desc, &gl); + if (likely(ret == 0)) + rspq->offset += ALIGN(fp->size, FL_ALIGN); + else + restore_rx_bufs(&gl, &rxq->fl, frag); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = rspq->handler(rspq, rspq->cur_desc, NULL); + } else { + WARN_ON(rsp_type > RSP_TYPE_CPL); + ret = 0; + } + + if (unlikely(ret)) { + /* + * Couldn't process descriptor, back off for recovery. + * We use the SGE's last timer which has the longest + * interrupt coalescing value ... + */ + const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; + rspq->next_intr_params = + QINTR_TIMER_IDX(NOMEM_TIMER_IDX); + break; + } + + rspq_next(rspq); + budget_left--; + } + + /* + * If this is a Response Queue with an associated Free List and + * at least two Egress Queue units available in the Free List + * for new buffer pointers, refill the Free List. + */ + if (rspq->offset >= 0 && + rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) + __refill_fl(rspq->adapter, &rxq->fl); + return budget - budget_left; +} + +/** + * napi_rx_handler - the NAPI handler for RX processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int intr_params; + struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); + int work_done = process_responses(rspq, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + intr_params = rspq->next_intr_params; + rspq->next_intr_params = rspq->intr_params; + } else + intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX); + + if (unlikely(work_done == 0)) + rspq->unhandled_irqs++; + + t4_write_reg(rspq->adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(work_done) | + INGRESSQID((u32)rspq->cntxt_id) | + SEINTARM(intr_params)); + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue for the NAPI case + * (i.e., response queue serviced by NAPI polling). + */ +irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *rspq = cookie; + + napi_schedule(&rspq->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + struct sge_rspq *intrq = &s->intrq; + unsigned int work_done; + + spin_lock(&adapter->sge.intrq_lock); + for (work_done = 0; ; work_done++) { + const struct rsp_ctrl *rc; + unsigned int qid, iq_idx; + struct sge_rspq *rspq; + + /* + * Grab the next response from the interrupt queue and bail + * out if it's not a new response. + */ + rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, intrq)) + break; + + /* + * If the response isn't a forwarded interrupt message issue a + * error and go on to the next response message. This should + * never happen ... + */ + rmb(); + if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { + dev_err(adapter->pdev_dev, + "Unexpected INTRQ response type %d\n", + RSPD_TYPE(rc->type_gen)); + continue; + } + + /* + * Extract the Queue ID from the interrupt message and perform + * sanity checking to make sure it really refers to one of our + * Ingress Queues which is active and matches the queue's ID. + * None of these error conditions should ever happen so we may + * want to either make them fatal and/or conditionalized under + * DEBUG. + */ + qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); + iq_idx = IQ_IDX(s, qid); + if (unlikely(iq_idx >= MAX_INGQ)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d out of range\n", qid); + continue; + } + rspq = s->ingr_map[iq_idx]; + if (unlikely(rspq == NULL)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d RSPQ=NULL\n", qid); + continue; + } + if (unlikely(rspq->abs_id != qid)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d refers to RSPQ %d\n", + qid, rspq->abs_id); + continue; + } + + /* + * Schedule NAPI processing on the indicated Response Queue + * and move on to the next entry in the Forwarded Interrupt + * Queue. + */ + napi_schedule(&rspq->napi); + rspq_next(intrq); + } + + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(work_done) | + INGRESSQID(intrq->cntxt_id) | + SEINTARM(intrq->intr_params)); + + spin_unlock(&adapter->sge.intrq_lock); + + return work_done; +} + +/* + * The MSI interrupt handler handles data events from SGE response queues as + * well as error and other async events as they all use the same MSI vector. + */ +irqreturn_t t4vf_intr_msi(int irq, void *cookie) +{ + struct adapter *adapter = cookie; + + process_intrq(adapter); + return IRQ_HANDLED; +} + +/** + * t4vf_intr_handler - select the top-level interrupt handler + * @adapter: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X or MSI). + */ +irq_handler_t t4vf_intr_handler(struct adapter *adapter) +{ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + if (adapter->flags & USING_MSIX) + return t4vf_sge_intr_msix; + else + return t4vf_intr_msi; +} + +/** + * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues + * @data: the adapter + * + * Runs periodically from a timer to perform maintenance of SGE RX queues. + * + * a) Replenishes RX queues that have run out due to memory shortage. + * Normally new RX buffers are added when existing ones are consumed but + * when out of memory a queue can become empty. We schedule NAPI to do + * the actual refill. + */ +static void sge_rx_timer_cb(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *s = &adapter->sge; + unsigned int i; + + /* + * Scan the "Starving Free Lists" flag array looking for any Free + * Lists in need of more free buffers. If we find one and it's not + * being actively polled, then bump its "starving" counter and attempt + * to refill it. If we're successful in adding enough buffers to push + * the Free List over the starving threshold, then we can clear its + * "starving" status. + */ + for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { + unsigned long m; + + for (m = s->starving_fl[i]; m; m &= m - 1) { + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_clear_bit(); + + /* + * Since we are accessing fl without a lock there's a + * small probability of a false positive where we + * schedule napi but the FL is no longer starving. + * No biggie. + */ + if (fl_starving(fl)) { + struct sge_eth_rxq *rxq; + + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + } + + /* + * Reschedule the next scan for starving Free Lists ... + */ + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +/** + * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues + * @data: the adapter + * + * Runs periodically from a timer to perform maintenance of SGE TX queues. + * + * b) Reclaims completed Tx packets for the Ethernet queues. Normally + * packets are cleaned up by new Tx packets, this timer cleans up packets + * when no new packets are being submitted. This is essential for pktgen, + * at least. + */ +static void sge_tx_timer_cb(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *s = &adapter->sge; + unsigned int i, budget; + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *txq = &s->ethtxq[i]; + + if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { + int avail = reclaimable(&txq->q); + + if (avail > budget) + avail = budget; + + free_tx_desc(adapter, &txq->q, avail, true); + txq->q.in_use -= avail; + __netif_tx_unlock(txq->txq); + + budget -= avail; + if (!budget) + break; + } + + i++; + if (i >= s->ethqsets) + i = 0; + } while (i != s->ethtxq_rover); + s->ethtxq_rover = i; + + /* + * If we found too many reclaimable packets schedule a timer in the + * near future to continue where we left off. Otherwise the next timer + * will be at its normal interval. + */ + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +/** + * t4vf_sge_alloc_rxq - allocate an SGE RX Queue + * @adapter: the adapter + * @rspq: pointer to to the new rxq's Response Queue to be filled in + * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue + * @dev: the network device associated with the new rspq + * @intr_dest: MSI-X vector index (overriden in MSI mode) + * @fl: pointer to the new rxq's Free List to be filled in + * @hnd: the interrupt handler to invoke for the rspq + */ +int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, + bool iqasynch, struct net_device *dev, + int intr_dest, + struct sge_fl *fl, rspq_handler_t hnd) +{ + struct port_info *pi = netdev_priv(dev); + struct fw_iq_cmd cmd, rpl; + int ret, iqandst, flsz = 0; + + /* + * If we're using MSI interrupts and we're not initializing the + * Forwarded Interrupt Queue itself, then set up this queue for + * indirect interrupts to the Forwarded Interrupt Queue. Obviously + * the Forwarded Interrupt Queue must be set up before any other + * ingress queue ... + */ + if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { + iqandst = SGE_INTRDST_IQ; + intr_dest = adapter->sge.intrq.abs_id; + } else + iqandst = SGE_INTRDST_PCI; + + /* + * Allocate the hardware ring for the Response Queue. The size needs + * to be a multiple of 16 which includes the mandatory status entry + * (regardless of whether the Status Page capabilities are enabled or + * not). + */ + rspq->size = roundup(rspq->size, 16); + rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, + 0, &rspq->phys_addr, NULL, 0); + if (!rspq->desc) + return -ENOMEM; + + /* + * Fill in the Ingress Queue Command. Note: Ideally this code would + * be in t4vf_hw.c but there are so many parameters and dependencies + * on our Linux SGE state that we would end up having to pass tons of + * parameters. We'll have to think about how this might be migrated + * into OS-independent common code ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | + FW_IQ_CMD_IQSTART(1) | + FW_LEN16(cmd)); + cmd.type_to_iqandstindex = + cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH(iqasynch) | + FW_IQ_CMD_VIID(pi->viid) | + FW_IQ_CMD_IQANDST(iqandst) | + FW_IQ_CMD_IQANUS(1) | + FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | + FW_IQ_CMD_IQANDSTINDEX(intr_dest)); + cmd.iqdroprss_to_iqesize = + cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | + FW_IQ_CMD_IQGTSMODE | + FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); + cmd.iqsize = cpu_to_be16(rspq->size); + cmd.iqaddr = cpu_to_be64(rspq->phys_addr); + + if (fl) { + /* + * Allocate the ring for the hardware free list (with space + * for its status page) along with the associated software + * descriptor ring. The free list size needs to be a multiple + * of the Egress Queue Unit. + */ + fl->size = roundup(fl->size, FL_PER_EQ_UNIT); + fl->desc = alloc_ring(adapter->pdev_dev, fl->size, + sizeof(__be64), sizeof(struct rx_sw_desc), + &fl->addr, &fl->sdesc, STAT_LEN); + if (!fl->desc) { + ret = -ENOMEM; + goto err; + } + + /* + * Calculate the size of the hardware free list ring plus + * status page (which the SGE will place at the end of the + * free list ring) in Egress Queue Units. + */ + flsz = (fl->size / FL_PER_EQ_UNIT + + STAT_LEN / EQ_UNIT); + + /* + * Fill in all the relevant firmware Ingress Queue Command + * fields for the free list. + */ + cmd.iqns_to_fl0congen = + cpu_to_be32( + FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | + FW_IQ_CMD_FL0PACKEN | + FW_IQ_CMD_FL0PADEN); + cmd.fl0dcaen_to_fl0cidxfthresh = + cpu_to_be16( + FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); + cmd.fl0size = cpu_to_be16(flsz); + cmd.fl0addr = cpu_to_be64(fl->addr); + } + + /* + * Issue the firmware Ingress Queue Command and extract the results if + * it completes successfully. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret) + goto err; + + netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64); + rspq->cur_desc = rspq->desc; + rspq->cidx = 0; + rspq->gen = 1; + rspq->next_intr_params = rspq->intr_params; + rspq->cntxt_id = be16_to_cpu(rpl.iqid); + rspq->abs_id = be16_to_cpu(rpl.physiqid); + rspq->size--; /* subtract status entry */ + rspq->adapter = adapter; + rspq->netdev = dev; + rspq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + rspq->offset = fl ? 0 : -1; + + if (fl) { + fl->cntxt_id = be16_to_cpu(rpl.fl0id); + fl->avail = 0; + fl->pend_cred = 0; + fl->pidx = 0; + fl->cidx = 0; + fl->alloc_failed = 0; + fl->large_alloc_failed = 0; + fl->starving = 0; + refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); + } + + return 0; + +err: + /* + * An error occurred. Clean up our partial allocation state and + * return the error. + */ + if (rspq->desc) { + dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, + rspq->desc, rspq->phys_addr); + rspq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT, + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +/** + * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue + * @adapter: the adapter + * @txq: pointer to the new txq to be filled in + * @devq: the network TX queue associated with the new txq + * @iqid: the relative ingress queue ID to which events relating to + * the new txq should be directed + */ +int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *devq, + unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_eth_cmd cmd, rpl; + struct port_info *pi = netdev_priv(dev); + + /* + * Calculate the size of the hardware TX Queue (including the + * status age on the end) in units of TX Descriptors. + */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + /* + * Allocate the hardware ring for the TX ring (with space for its + * status page) along with the associated software descriptor ring. + */ + txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, + sizeof(struct tx_desc), + sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + /* + * Fill in the Egress Queue Command. Note: As with the direct use of + * the firmware Ingress Queue COmmand above in our RXQ allocation + * routine, ideally, this code would be in t4vf_hw.c. Again, we'll + * have to see if there's some reasonable way to parameterize it + * into the common code ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | + FW_EQ_ETH_CMD_EQSTART | + FW_LEN16(cmd)); + cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); + cmd.fetchszm_to_iqid = + cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | + FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | + FW_EQ_ETH_CMD_IQID(iqid)); + cmd.dcaen_to_eqsize = + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | + FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | + FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | + FW_EQ_ETH_CMD_EQSIZE(nentries)); + cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); + + /* + * Issue the firmware Egress Queue Command and extract the results if + * it completes successfully. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret) { + /* + * The girmware Ingress Queue Command failed for some reason. + * Free up our partial allocation state and return the error. + */ + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adapter->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + txq->q.in_use = 0; + txq->q.cidx = 0; + txq->q.pidx = 0; + txq->q.stat = (void *)&txq->q.desc[txq->q.size]; + txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); + txq->q.abs_id = + FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); + txq->txq = devq; + txq->tso = 0; + txq->tx_cso = 0; + txq->vlan_ins = 0; + txq->q.stops = 0; + txq->q.restarts = 0; + txq->mapping_err = 0; + return 0; +} + +/* + * Free the DMA map resources associated with a TX queue. + */ +static void free_txq(struct adapter *adapter, struct sge_txq *tq) +{ + dma_free_coherent(adapter->pdev_dev, + tq->size * sizeof(*tq->desc) + STAT_LEN, + tq->desc, tq->phys_addr); + tq->cntxt_id = 0; + tq->sdesc = NULL; + tq->desc = NULL; +} + +/* + * Free the resources associated with a response queue (possibly including a + * free list). + */ +static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, + struct sge_fl *fl) +{ + unsigned int flid = fl ? fl->cntxt_id : 0xffff; + + t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, + rspq->cntxt_id, flid, 0xffff); + dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, + rspq->desc, rspq->phys_addr); + netif_napi_del(&rspq->napi); + rspq->netdev = NULL; + rspq->cntxt_id = 0; + rspq->abs_id = 0; + rspq->desc = NULL; + + if (fl) { + free_rx_bufs(adapter, fl, fl->avail); + dma_free_coherent(adapter->pdev_dev, + fl->size * sizeof(*fl->desc) + STAT_LEN, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4vf_free_sge_resources - free SGE resources + * @adapter: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4vf_free_sge_resources(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + struct sge_eth_rxq *rxq = s->ethrxq; + struct sge_eth_txq *txq = s->ethtxq; + struct sge_rspq *evtq = &s->fw_evtq; + struct sge_rspq *intrq = &s->intrq; + int qs; + + for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { + if (rxq->rspq.desc) + free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); + if (txq->q.desc) { + t4vf_eth_eq_free(adapter, txq->q.cntxt_id); + free_tx_desc(adapter, &txq->q, txq->q.in_use, true); + kfree(txq->q.sdesc); + free_txq(adapter, &txq->q); + } + } + if (evtq->desc) + free_rspq_fl(adapter, evtq, NULL); + if (intrq->desc) + free_rspq_fl(adapter, intrq, NULL); +} + +/** + * t4vf_sge_start - enable SGE operation + * @adapter: the adapter + * + * Start tasklets and timers associated with the DMA engine. + */ +void t4vf_sge_start(struct adapter *adapter) +{ + adapter->sge.ethtxq_rover = 0; + mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4vf_sge_stop - disable SGE operation + * @adapter: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4vf_sge_stop(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); +} + +/** + * t4vf_sge_init - initialize SGE + * @adapter: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queue sets here, instead the driver + * top-level must request those individually. We also do not enable DMA + * here, that should be done after the queues have been set up. + */ +int t4vf_sge_init(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 fl0 = sge_params->sge_fl_buffer_size[0]; + u32 fl1 = sge_params->sge_fl_buffer_size[1]; + struct sge *s = &adapter->sge; + + /* + * Start by vetting the basic SGE parameters which have been set up by + * the Physical Function Driver. Ideally we should be able to deal + * with _any_ configuration. Practice is different ... + */ + if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", + fl0, fl1); + return -EINVAL; + } + if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { + dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + /* + * Now translate the adapter parameters into our internal forms. + */ + if (fl1) + FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; + STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); + PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); + FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + + SGE_INGPADBOUNDARY_SHIFT); + + /* + * Set up tasklet timers. + */ + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter); + + /* + * Initialize Forwarded Interrupt Queue lock. + */ + spin_lock_init(&s->intrq_lock); + + return 0; +} diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h new file mode 100644 index 0000000000000000000000000000000000000000..5c7bde7f9baeb18268c80f04e63cdcf951fe328c --- /dev/null +++ b/drivers/net/cxgb4vf/t4vf_common.h @@ -0,0 +1,273 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4VF_COMMON_H__ +#define __T4VF_COMMON_H__ + +#include "../cxgb4/t4fw_api.h" + +/* + * The "len16" field of a Firmware Command Structure ... + */ +#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) + +/* + * Per-VF statistics. + */ +struct t4vf_port_stats { + /* + * TX statistics. + */ + u64 tx_bcast_bytes; /* broadcast */ + u64 tx_bcast_frames; + u64 tx_mcast_bytes; /* multicast */ + u64 tx_mcast_frames; + u64 tx_ucast_bytes; /* unicast */ + u64 tx_ucast_frames; + u64 tx_drop_frames; /* TX dropped frames */ + u64 tx_offload_bytes; /* offload */ + u64 tx_offload_frames; + + /* + * RX statistics. + */ + u64 rx_bcast_bytes; /* broadcast */ + u64 rx_bcast_frames; + u64 rx_mcast_bytes; /* multicast */ + u64 rx_mcast_frames; + u64 rx_ucast_bytes; + u64 rx_ucast_frames; /* unicast */ + + u64 rx_err_frames; /* RX error frames */ +}; + +/* + * Per-"port" (Virtual Interface) link configuration ... + */ +struct link_config { + unsigned int supported; /* link capabilities */ + unsigned int advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +/* + * General device parameters ... + */ +struct dev_params { + u32 fwrev; /* firmware version */ + u32 tprev; /* TP Microcode Version */ +}; + +/* + * Scatter Gather Engine parameters. These are almost all determined by the + * Physical Function Driver. We just need to grab them to see within which + * environment we're playing ... + */ +struct sge_params { + u32 sge_control; /* padding, boundaries, lengths, etc. */ + u32 sge_host_page_size; /* RDMA page sizes */ + u32 sge_queues_per_page; /* RDMA queues/page */ + u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ + u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ + u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ + u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ + u32 sge_timer_value_2_and_3; + u32 sge_timer_value_4_and_5; +}; + +/* + * Vital Product Data parameters. + */ +struct vpd_params { + u32 cclk; /* Core Clock (KHz) */ +}; + +/* + * Global Receive Side Scaling (RSS) parameters in host-native format. + */ +struct rss_params { + unsigned int mode; /* RSS mode */ + union { + struct { + int synmapen:1; /* SYN Map Enable */ + int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ + int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ + int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ + int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ + int ofdmapen:1; /* Offload Map Enable */ + int tnlmapen:1; /* Tunnel Map Enable */ + int tnlalllookup:1; /* Tunnel All Lookup */ + int hashtoeplitz:1; /* use Toeplitz hash */ + } basicvirtual; + } u; +}; + +/* + * Virtual Interface RSS Configuration in host-native format. + */ +union rss_vi_config { + struct { + u16 defaultq; /* Ingress Queue ID for !tnlalllookup */ + int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ + int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ + int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ + int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ + int udpen; /* hash 4-tuple UDP ingress packets */ + } basicvirtual; +}; + +/* + * Maximum resources provisioned for a PCI VF. + */ +struct vf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + +/* + * Per-"adapter" (Virtual Function) parameters. + */ +struct adapter_params { + struct dev_params dev; /* general device parameters */ + struct sge_params sge; /* Scatter Gather Engine */ + struct vpd_params vpd; /* Vital Product Data */ + struct rss_params rss; /* Receive Side Scaling */ + struct vf_resources vfres; /* Virtual Function Resource limits */ + u8 nports; /* # of Ethernet "ports" */ +}; + +#include "adapter.h" + +#ifndef PCI_VENDOR_ID_CHELSIO +# define PCI_VENDOR_ID_CHELSIO 0x1425 +#endif + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; iter++) + +static inline bool is_10g_port(const struct link_config *lc) +{ + return (lc->supported & SUPPORTED_10000baseT_Full) != 0; +} + +static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) +{ + return adapter->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adapter, + unsigned int us) +{ + return (us * adapter->params.vpd.cclk) / 1000; +} + +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + return (ticks * 1000) / adapter->params.vpd.cclk; +} + +int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool); + +static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true); +} + +static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); +} + +int __devinit t4vf_wait_dev_ready(struct adapter *); +int __devinit t4vf_port_init(struct adapter *, int); + +int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); +int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); + +int t4vf_get_sge_params(struct adapter *); +int t4vf_get_vpd_params(struct adapter *); +int t4vf_get_dev_params(struct adapter *); +int t4vf_get_rss_glb_config(struct adapter *); +int t4vf_get_vfres(struct adapter *); + +int t4vf_read_rss_vi_config(struct adapter *, unsigned int, + union rss_vi_config *); +int t4vf_write_rss_vi_config(struct adapter *, unsigned int, + union rss_vi_config *); +int t4vf_config_rss_range(struct adapter *, unsigned int, int, int, + const u16 *, int); + +int t4vf_alloc_vi(struct adapter *, int); +int t4vf_free_vi(struct adapter *, int); +int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool); +int t4vf_identify_port(struct adapter *, unsigned int, unsigned int); + +int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, + bool); +int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int, + const u8 **, u16 *, u64 *, bool); +int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool); +int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool); +int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *); + +int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, + unsigned int); +int t4vf_eth_eq_free(struct adapter *, unsigned int); + +int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); + +#endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/cxgb4vf/t4vf_defs.h b/drivers/net/cxgb4vf/t4vf_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..c7b127d937671698a400b5b73187eb28c1a51c2f --- /dev/null +++ b/drivers/net/cxgb4vf/t4vf_defs.h @@ -0,0 +1,121 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4VF_DEFS_H__ +#define __T4VF_DEFS_H__ + +#include "../cxgb4/t4_regs.h" + +/* + * The VF Register Map. + * + * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local + * bus module (PL) and CPU Interface Module (CIM) components are mapped via + * the Slice to Module Map Table (see below) in the Physical Function Register + * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base + * and Offset registers in the PF Register Map. The MBDATA base address is + * quite constrained as it determines the Mailbox Data addresses for both PFs + * and VFs, and therefore must fit in both the VF and PF Register Maps without + * overlapping other registers. + */ +#define T4VF_SGE_BASE_ADDR 0x0000 +#define T4VF_MPS_BASE_ADDR 0x0100 +#define T4VF_PL_BASE_ADDR 0x0200 +#define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T4VF_CIM_BASE_ADDR 0x0300 + +#define T4VF_REGMAP_START 0x0000 +#define T4VF_REGMAP_SIZE 0x0400 + +/* + * There's no hardware limitation which requires that the addresses of the + * Mailbox Data in the fixed CIM PF map and the programmable VF map must + * match. However, it's a useful convention ... + */ +#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA +#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA! +#endif + +/* + * Virtual Function "Slice to Module Map Table" definitions. + * + * This table allows us to map subsets of the various module register sets + * into the T4VF Register Map. Each table entry identifies the index of the + * module whose registers are being mapped, the offset within the module's + * register set that the mapping should start at, the limit of the mapping, + * and the offset within the T4VF Register Map to which the module's registers + * are being mapped. All addresses and qualtities are in terms of 32-bit + * words. The "limit" value is also in terms of 32-bit words and is equal to + * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<=" + * relation rather than a "<"). + */ +#define T4VF_MOD_MAP(module, index, first, last) \ + T4VF_MOD_MAP_##module##_INDEX = (index), \ + T4VF_MOD_MAP_##module##_FIRST = (first), \ + T4VF_MOD_MAP_##module##_LAST = (last), \ + T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \ + T4VF_MOD_MAP_##module##_BASE = \ + (T4VF_##module##_BASE_ADDR/4 + (first)/4), \ + T4VF_MOD_MAP_##module##_LIMIT = \ + (T4VF_##module##_BASE_ADDR/4 + (last)/4), + +#define SGE_VF_KDOORBELL 0x0 +#define SGE_VF_GTS 0x4 +#define MPS_VF_CTL 0x0 +#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc +#define PL_VF_WHOAMI 0x0 +#define CIM_VF_EXT_MAILBOX_CTRL 0x0 +#define CIM_VF_EXT_MAILBOX_STATUS 0x4 + +enum { + T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS) + T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H) + T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI) + T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS) +}; + +/* + * There isn't a Slice to Module Map Table entry for the Mailbox Data + * registers, but it's convenient to use similar names as above. There are 8 + * little-endian 64-bit Mailbox Data registers. Note that the "instances" + * value below is in terms of 32-bit words which matches the "word" addressing + * space we use above for the Slice to Module Map Space. + */ +#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16 + +#define T4VF_MBDATA_FIRST 0 +#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4) + +#endif /* __T4T4VF_DEFS_H__ */ diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..ea1c123f0cb4b9e44eeac857920622aa9d51e8b5 --- /dev/null +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -0,0 +1,1333 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4fw_api.h" + +/* + * Wait for the device to become ready (signified by our "who am I" register + * returning a value other than all 1's). Return an error if it doesn't + * become ready ... + */ +int __devinit t4vf_wait_dev_ready(struct adapter *adapter) +{ + const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; + const u32 notready1 = 0xffffffff; + const u32 notready2 = 0xeeeeeeee; + u32 val; + + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + msleep(500); + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + else + return -EIO; +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order + * (since the firmware data structures are specified in a big-endian layout). + */ +static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, + u32 mbox_data) +{ + for ( ; size; size -= 8, mbox_data += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); +} + +/* + * Dump contents of mailbox with a leading tag. + */ +static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) +{ + dev_err(adapter->pdev_dev, + "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, + (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); +} + +/** + * t4vf_wr_mbox_core - send a command to FW through the mailbox + * @adapter: the adapter + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the mailbox and waits for the + * FW to execute the command. If @rpl is not %NULL it is used to store + * the FW's reply to the command. The command and its optional reply + * are of the same length. FW can take up to 500 ms to respond. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + u32 v; + int i, ms, delay_idx; + const __be64 *p; + u32 mbox_data = T4VF_MBDATA_BASE_ADDR; + u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; + + /* + * Commands must be multiples of 16 bytes in length and may not be + * larger than the size of the Mailbox Data register array. + */ + if ((size % 16) != 0 || + size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) + return -EINVAL; + + /* + * Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ + v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); + if (v != MBOX_OWNER_DRV) + return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; + + /* + * Write the command array into the Mailbox Data register array and + * transfer ownership of the mailbox to the firmware. + */ + for (i = 0, p = cmd; i < size; i += 8) + t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + t4_write_reg(adapter, mbox_ctl, + MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); + t4_read_reg(adapter, mbox_ctl); /* flush write */ + + /* + * Spin waiting for firmware to acknowledge processing our command. + */ + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < 500; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + /* + * If we're the owner, see if this is the reply we wanted. + */ + v = t4_read_reg(adapter, mbox_ctl); + if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { + /* + * If the Message Valid bit isn't on, revoke ownership + * of the mailbox and continue waiting for our reply. + */ + if ((v & MBMSGVALID) == 0) { + t4_write_reg(adapter, mbox_ctl, + MBOWNER(MBOX_OWNER_NONE)); + continue; + } + + /* + * We now have our reply. Extract the command return + * value, copy the reply back to our caller's buffer + * (if specified) and revoke ownership of the mailbox. + * We return the (negated) firmware command return + * code (this depends on FW_SUCCESS == 0). + */ + + /* return value in low-order little-endian word */ + v = t4_read_reg(adapter, mbox_data); + if (FW_CMD_RETVAL_GET(v)) + dump_mbox(adapter, "FW Error", mbox_data); + + if (rpl) { + /* request bit in high-order BE word */ + WARN_ON((be32_to_cpu(*(const u32 *)cmd) + & FW_CMD_REQUEST) == 0); + get_mbox_rpl(adapter, rpl, size, mbox_data); + WARN_ON((be32_to_cpu(*(u32 *)rpl) + & FW_CMD_REQUEST) != 0); + } + t4_write_reg(adapter, mbox_ctl, + MBOWNER(MBOX_OWNER_NONE)); + return -FW_CMD_RETVAL_GET(v); + } + } + + /* + * We timed out. Return the error ... + */ + dump_mbox(adapter, "FW Timeout", mbox_data); + return -ETIMEDOUT; +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by hardware + * inexact (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void __devinit init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising = lc->supported; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * t4vf_port_init - initialize port hardware/software state + * @adapter: the adapter + * @pidx: the adapter port index + */ +int __devinit t4vf_port_init(struct adapter *adapter, int pidx) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + struct fw_vi_cmd vi_cmd, vi_rpl; + struct fw_port_cmd port_cmd, port_rpl; + int v; + u32 word; + + /* + * Execute a VI Read command to get our Virtual Interface information + * like MAC address, etc. + */ + memset(&vi_cmd, 0, sizeof(vi_cmd)); + vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); + vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid)); + v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (v) + return v; + + BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd)); + pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd)); + t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); + + /* + * If we don't have read access to our port information, we're done + * now. Otherwise, execute a PORT Read command to get it ... + */ + if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) + return 0; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ | + FW_PORT_CMD_PORTID(pi->port_id)); + port_cmd.action_to_len16 = + cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(port_cmd)); + v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); + if (v) + return v; + + v = 0; + word = be16_to_cpu(port_rpl.u.info.pcap); + if (word & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (word & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (word & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + if (word & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + init_link_config(&pi->link_cfg, v); + + return 0; +} + +/** + * t4vf_query_params - query FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Reads the values of firmware or device parameters. Up to 7 parameters + * can be queried at once. + */ +int t4vf_query_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, u32 *vals) +{ + int i, ret; + struct fw_params_cmd cmd, rpl; + struct fw_params_param *p; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams].mnem), 16); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) + p->mnem = htonl(*params++); + + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) + for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) + *vals++ = be32_to_cpu(p->val); + return ret; +} + +/** + * t4vf_set_params - sets FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Sets the values of firmware or device parameters. Up to 7 parameters + * can be specified at once. + */ +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals) +{ + int i; + struct fw_params_cmd cmd; + struct fw_params_param *p; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { + p->mnem = cpu_to_be32(*params++); + p->val = cpu_to_be32(*vals++); + } + + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters + * @adapter: the adapter + * + * Retrieves various core SGE parameters in the form of hardware SGE + * register values. The caller is responsible for decoding these as + * needed. The SGE parameters are stored in @adapter->params.sge. + */ +int t4vf_get_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_CONTROL)); + params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE)); + params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0)); + params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1)); + params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1)); + params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3)); + params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5)); + v = t4vf_query_params(adapter, 7, params, vals); + if (v) + return v; + sge_params->sge_control = vals[0]; + sge_params->sge_host_page_size = vals[1]; + sge_params->sge_fl_buffer_size[0] = vals[2]; + sge_params->sge_fl_buffer_size[1] = vals[3]; + sge_params->sge_timer_value_0_and_1 = vals[4]; + sge_params->sge_timer_value_2_and_3 = vals[5]; + sge_params->sge_timer_value_4_and_5 = vals[6]; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v) + return v; + sge_params->sge_ingress_rx_threshold = vals[0]; + + return 0; +} + +/** + * t4vf_get_vpd_params - retrieve device VPD paremeters + * @adapter: the adapter + * + * Retrives various device Vital Product Data parameters. The parameters + * are stored in @adapter->params.vpd. + */ +int t4vf_get_vpd_params(struct adapter *adapter) +{ + struct vpd_params *vpd_params = &adapter->params.vpd; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v) + return v; + vpd_params->cclk = vals[0]; + + return 0; +} + +/** + * t4vf_get_dev_params - retrieve device paremeters + * @adapter: the adapter + * + * Retrives various device parameters. The parameters are stored in + * @adapter->params.dev. + */ +int t4vf_get_dev_params(struct adapter *adapter) +{ + struct dev_params *dev_params = &adapter->params.dev; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v) + return v; + dev_params->fwrev = vals[0]; + dev_params->tprev = vals[1]; + + return 0; +} + +/** + * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration + * @adapter: the adapter + * + * Retrieves global RSS mode and parameters with which we have to live + * and stores them in the @adapter's RSS parameters. + */ +int t4vf_get_rss_glb_config(struct adapter *adapter) +{ + struct rss_params *rss = &adapter->params.rss; + struct fw_rss_glb_config_cmd cmd, rpl; + int v; + + /* + * Execute an RSS Global Configuration read command to retrieve + * our RSS configuration. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + /* + * Transate the big-endian RSS Global Configuration into our + * cpu-endian format based on the RSS mode. We also do first level + * filtering at this point to weed out modes which don't support + * VF Drivers ... + */ + rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET( + be32_to_cpu(rpl.u.manual.mode_pkd)); + switch (rss->mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu( + rpl.u.basicvirtual.synmapen_to_hashtoeplitz); + + rss->u.basicvirtual.synmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + rss->u.basicvirtual.syn4tupenipv6 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + rss->u.basicvirtual.syn2tupenipv6 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + rss->u.basicvirtual.syn4tupenipv4 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + rss->u.basicvirtual.syn2tupenipv4 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + + rss->u.basicvirtual.ofdmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + + rss->u.basicvirtual.tnlmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + rss->u.basicvirtual.tnlalllookup = + ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + + rss->u.basicvirtual.hashtoeplitz = + ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + + /* we need at least Tunnel Map Enable to be set */ + if (!rss->u.basicvirtual.tnlmapen) + return -EINVAL; + break; + } + + default: + /* all unknown/unsupported RSS modes result in an error */ + return -EINVAL; + } + + return 0; +} + +/** + * t4vf_get_vfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a virtual + * function. The results are stored in @adapter->vfres. + */ +int t4vf_get_vfres(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct fw_pfvf_cmd cmd, rpl; + int v; + u32 word; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + /* + * Extract VF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word); + vfres->niq = FW_PFVF_CMD_NIQ_GET(word); + + word = be32_to_cpu(rpl.type_to_neq); + vfres->neq = FW_PFVF_CMD_NEQ_GET(word); + vfres->pmask = FW_PFVF_CMD_PMASK_GET(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + vfres->tc = FW_PFVF_CMD_TC_GET(word); + vfres->nvi = FW_PFVF_CMD_NVI_GET(word); + vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word); + vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word); + vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word); + + return 0; +} + +/** + * t4vf_read_rss_vi_config - read a VI's RSS configuration + * @adapter: the adapter + * @viid: Virtual Interface ID + * @config: pointer to host-native VI RSS Configuration buffer + * + * Reads the Virtual Interface's RSS configuration information and + * translates it into CPU-native format. + */ +int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, + union rss_vi_config *config) +{ + struct fw_rss_vi_config_cmd cmd, rpl; + int v; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ | + FW_RSS_VI_CONFIG_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); + + config->basicvirtual.ip6fourtupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0); + config->basicvirtual.ip6twotupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0); + config->basicvirtual.ip4fourtupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0); + config->basicvirtual.ip4twotupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0); + config->basicvirtual.udpen = + ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0); + config->basicvirtual.defaultq = + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word); + break; + } + + default: + return -EINVAL; + } + + return 0; +} + +/** + * t4vf_write_rss_vi_config - write a VI's RSS configuration + * @adapter: the adapter + * @viid: Virtual Interface ID + * @config: pointer to host-native VI RSS Configuration buffer + * + * Write the Virtual Interface's RSS configuration information + * (translating it into firmware-native format before writing). + */ +int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, + union rss_vi_config *config) +{ + struct fw_rss_vi_config_cmd cmd, rpl; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_RSS_VI_CONFIG_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = 0; + + if (config->basicvirtual.ip6fourtupen) + word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + if (config->basicvirtual.ip6twotupen) + word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + if (config->basicvirtual.ip4fourtupen) + word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + if (config->basicvirtual.ip4twotupen) + word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + if (config->basicvirtual.udpen) + word |= FW_RSS_VI_CONFIG_CMD_UDPEN; + word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ( + config->basicvirtual.defaultq); + cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); + break; + } + + default: + return -EINVAL; + } + + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); +} + +/** + * t4vf_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @viid: Virtual Interface of RSS Table Slice + * @start: starting entry in the table to write + * @n: how many table entries to write + * @rspq: values for the "Response Queue" (Ingress Queue) lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range 0..1023. + */ +int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, + int start, int n, const u16 *rspq, int nrspq) +{ + const u16 *rsp = rspq; + const u16 *rsp_end = rspq+nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + /* + * Initialize firmware command template to write the RSS table. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + /* + * Each firmware RSS command can accommodate up to 32 RSS Ingress + * Queue Identifiers. These Ingress Queue IDs are packed three to + * a 32-bit word as 10-bit values with the upper remaining 2 bits + * reserved. + */ + while (n > 0) { + __be32 *qp = &cmd.iq0_to_iq2; + int nq = min(n, 32); + int ret; + + /* + * Set up the firmware RSS command header to send the next + * "nq" Ingress Queue IDs to the firmware. + */ + cmd.niqid = cpu_to_be16(nq); + cmd.startidx = cpu_to_be16(start); + + /* + * "nq" more done for the start of the next loop. + */ + start += nq; + n -= nq; + + /* + * While there are still Ingress Queue IDs to stuff into the + * current firmware RSS command, retrieve them from the + * Ingress Queue ID array and insert them into the command. + */ + while (nq > 0) { + /* + * Grab up to the next 3 Ingress Queue IDs (wrapping + * around the Ingress Queue ID array if necessary) and + * insert them into the firmware RSS command at the + * current 3-tuple position within the commad. + */ + u16 qbuf[3]; + u16 *qbp = qbuf; + int nqbuf = min(3, nq); + + nq -= nqbuf; + qbuf[0] = qbuf[1] = qbuf[2] = 0; + while (nqbuf) { + nqbuf--; + *qbp++ = *rsp++; + if (rsp >= rsp_end) + rsp = rspq; + } + *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | + FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | + FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + } + + /* + * Send this portion of the RRS table update to the firmware; + * bail out on any errors. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4vf_alloc_vi - allocate a virtual interface on a port + * @adapter: the adapter + * @port_id: physical port associated with the VI + * + * Allocate a new Virtual Interface and bind it to the indicated + * physical port. Return the new Virtual Interface Identifier on + * success, or a [negative] error number on failure. + */ +int t4vf_alloc_vi(struct adapter *adapter, int port_id) +{ + struct fw_vi_cmd cmd, rpl; + int v; + + /* + * Execute a VI command to allocate Virtual Interface and return its + * VIID. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + FW_VI_CMD_ALLOC); + cmd.portid_pkd = FW_VI_CMD_PORTID(port_id); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid)); +} + +/** + * t4vf_free_vi -- free a virtual interface + * @adapter: the adapter + * @viid: the virtual interface identifier + * + * Free a previously allocated Virtual Interface. Return an error on + * failure. + */ +int t4vf_free_vi(struct adapter *adapter, int viid) +{ + struct fw_vi_cmd cmd; + + /* + * Execute a VI command to free the Virtual Interface. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + FW_VI_CMD_FREE); + cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_enable_vi - enable/disable a virtual interface + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, + bool rx_en, bool tx_en) +{ + struct fw_vi_enable_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC | + FW_VI_ENABLE_CMD_VIID(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) | + FW_VI_ENABLE_CMD_EEN(tx_en) | + FW_LEN16(cmd)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_identify_port - identify a VI's port by blinking its LED + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4vf_identify_port(struct adapter *adapter, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC | + FW_VI_ENABLE_CMD_VIID(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED | + FW_LEN16(cmd)); + cmd.blinkdur = cpu_to_be16(nblinks); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_set_rxmode - set Rx properties of a virtual interface + * @adapter: the adapter + * @viid: the VI id + * @mtu: the new MTU or -1 for no change + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, + * -1 no change + * + * Sets Rx properties of a virtual interface. + */ +int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd cmd; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_VI_RXMODE_CMD_MTU_MASK; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + if (vlanex < 0) + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_VI_RXMODE_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + cmd.mtu_to_vlanexen = + cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); +} + +/** + * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adapter: the adapter + * @viid: the Virtual Interface Identifier + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, + unsigned int naddr, const u8 **addr, u16 *idx, + u64 *hash, bool sleep_ok) +{ + int i, ret; + struct fw_vi_mac_cmd cmd, rpl; + struct fw_vi_mac_exact *p; + size_t len16; + + if (naddr > ARRAY_SIZE(cmd.u.exact)) + return -EINVAL; + len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[naddr]), 16); + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16(len16)); + + for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) { + p->valid_to_idx = + cpu_to_be16(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); + } + + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); + if (ret) + return ret; + + for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES + ? 0xffff + : index); + if (index < FW_CLS_TCAM_NUM_ENTRIES) + ret++; + else if (hash) + *hash |= (1 << hash_mac_addr(addr[i])); + } + return ret; +} + +/** + * t4vf_change_mac - modifies the exact-match filter for a MAC address + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: if idx < 0, the new MAC allocation should be persistent + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the + * one being used by the old address value and allocate a new filter for + * the new address value. @idx can be -1 if the address is a new + * addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4vf_change_mac(struct adapter *adapter, unsigned int viid, + int idx, const u8 *addr, bool persist) +{ + int ret; + struct fw_vi_mac_cmd cmd, rpl; + struct fw_vi_mac_exact *p = &cmd.u.exact[0]; + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[1]), 16); + + /* + * If this is a new allocation, determine whether it should be + * persistent (across a "freemacs" operation) or not. + */ + if (idx < 0) + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_VI_MAC_CMD_VIID(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) { + p = &rpl.u.exact[0]; + ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); + if (ret >= FW_CLS_TCAM_NUM_ENTRIES) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4vf_set_addr_hash - program the MAC inexact-match hash filter + * @adapter: the adapter + * @viid: the Virtual Interface Identifier + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd cmd; + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[0]), 16); + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_VI_ENABLE_CMD_VIID(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN | + FW_VI_MAC_CMD_HASHUNIEN(ucast) | + FW_CMD_LEN16(len16)); + cmd.u.hash.hashvec = cpu_to_be64(vec); + return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); +} + +/** + * t4vf_get_port_stats - collect "port" statistics + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface. + */ +int t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct t4vf_port_stats *s) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + struct fw_vi_stats_vf fwstats; + unsigned int rem = VI_VF_NUM_STATS; + __be64 *fwsp = (__be64 *)&fwstats; + + /* + * Grab the Virtual Interface statistics a chunk at a time via mailbox + * commands. We could use a Work Request and get all of them at once + * but that's an asynchronous interface which is awkward to use. + */ + while (rem) { + unsigned int ix = VI_VF_NUM_STATS - rem; + unsigned int nstats = min(6U, rem); + struct fw_vi_stats_cmd cmd, rpl; + size_t len = (offsetof(struct fw_vi_stats_cmd, u) + + sizeof(struct fw_vi_stats_ctl)); + size_t len16 = DIV_ROUND_UP(len, 16); + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) | + FW_VI_STATS_CMD_VIID(pi->viid) | + FW_CMD_REQUEST | + FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.u.ctl.nstats_ix = + cpu_to_be16(FW_VI_STATS_CMD_IX(ix) | + FW_VI_STATS_CMD_NSTATS(nstats)); + ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); + if (ret) + return ret; + + memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); + + rem -= nstats; + fwsp += nstats; + } + + /* + * Translate firmware statistics into host native statistics. + */ + s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); + s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); + s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); + s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); + s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); + s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); + s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); + s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); + s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); + + s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); + s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); + s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); + s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); + s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); + s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); + + s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); + + return 0; +} + +/** + * t4vf_iq_free - free an ingress queue and its free lists + * @adapter: the adapter + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue ID + * @fl0id: FL0 queue ID or 0xffff if no attached FL0 + * @fl1id: FL1 queue ID or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated free lists, if any. + */ +int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, + unsigned int iqid, unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE | + FW_LEN16(cmd)); + cmd.type_to_iqandstindex = + cpu_to_be32(FW_IQ_CMD_TYPE(iqtype)); + + cmd.iqid = cpu_to_be16(iqid); + cmd.fl0id = cpu_to_be16(fl0id); + cmd.fl1id = cpu_to_be16(fl1id); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_eth_eq_free - free an Ethernet egress queue + * @adapter: the adapter + * @eqid: egress queue ID + * + * Frees an Ethernet egress queue. + */ +int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) +{ + struct fw_eq_eth_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE | + FW_LEN16(cmd)); + cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_handle_fw_rpl - process a firmware reply message + * @adapter: the adapter + * @rpl: start of the firmware message + * + * Processes a firmware message, such as link state change messages. + */ +int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) +{ + struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl; + u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); + + switch (opcode) { + case FW_PORT_CMD: { + /* + * Link/module state change message. + */ + const struct fw_port_cmd *port_cmd = (void *)rpl; + u32 word; + int action, port_id, link_ok, speed, fc, pidx; + + /* + * Extract various fields from port status change message. + */ + action = FW_PORT_CMD_ACTION_GET( + be32_to_cpu(port_cmd->action_to_len16)); + if (action != FW_PORT_ACTION_GET_PORT_INFO) { + dev_err(adapter->pdev_dev, + "Unknown firmware PORT reply action %x\n", + action); + break; + } + + port_id = FW_PORT_CMD_PORTID_GET( + be32_to_cpu(port_cmd->op_to_portid)); + + word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); + link_ok = (word & FW_PORT_CMD_LSTATUS) != 0; + speed = 0; + fc = 0; + if (word & FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (word & FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = SPEED_100; + else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = SPEED_1000; + else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = SPEED_10000; + + /* + * Scan all of our "ports" (Virtual Interfaces) looking for + * those bound to the physical port which has changed. If + * our recorded state doesn't match the current state, + * signal that change to the OS code. + */ + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + struct link_config *lc; + + if (pi->port_id != port_id) + continue; + + lc = &pi->link_cfg; + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { + /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + t4vf_os_link_changed(adapter, pidx, link_ok); + } + } + break; + } + + default: + dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", + opcode); + } + return 0; +} diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 08e82b1a0b336219279738984c8511c0fbe50c4e..d0824e322068d4842ebdee10a7cce87a87a7247c 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -298,6 +298,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_CTRL_EWCTL (0x4) #define EMAC_CTRL_EWINTTCNT (0x8) +/* EMAC DM644x control module masks */ +#define EMAC_DM644X_EWINTCNT_MASK 0x1FFFF +#define EMAC_DM644X_INTMIN_INTVL 0x1 +#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK) + /* EMAC MDIO related */ /* Mask & Control defines */ #define MDIO_CONTROL_CLKDIV (0xFF) @@ -318,8 +323,20 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define MDIO_CONTROL (0x04) /* EMAC DM646X control module registers */ -#define EMAC_DM646X_CMRXINTEN (0x14) -#define EMAC_DM646X_CMTXINTEN (0x18) +#define EMAC_DM646X_CMINTCTRL 0x0C +#define EMAC_DM646X_CMRXINTEN 0x14 +#define EMAC_DM646X_CMTXINTEN 0x18 +#define EMAC_DM646X_CMRXINTMAX 0x70 +#define EMAC_DM646X_CMTXINTMAX 0x74 + +/* EMAC DM646X control module masks */ +#define EMAC_DM646X_INTPACEEN (0x3 << 16) +#define EMAC_DM646X_INTPRESCALE_MASK (0x7FF << 0) +#define EMAC_DM646X_CMINTMAX_CNT 63 +#define EMAC_DM646X_CMINTMIN_CNT 2 +#define EMAC_DM646X_CMINTMAX_INTVL (1000 / EMAC_DM646X_CMINTMIN_CNT) +#define EMAC_DM646X_CMINTMIN_INTVL ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1) + /* EMAC EOI codes for C0 */ #define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) @@ -468,9 +485,10 @@ struct emac_priv { u32 duplex; /* Link duplex: 0=Half, 1=Full */ u32 rx_buf_size; u32 isr_count; + u32 coal_intvl; + u32 bus_freq_mhz; u8 rmii_en; u8 version; - struct net_device_stats net_dev_stats; u32 mac_hash1; u32 mac_hash2; u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; @@ -546,9 +564,11 @@ static void emac_dump_regs(struct emac_priv *priv) /* Print important registers in EMAC */ dev_info(emac_dev, "EMAC Basic registers\n"); - dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n", - emac_ctrl_read(EMAC_CTRL_EWCTL), - emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); + if (priv->version == EMAC_VERSION_1) { + dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n", + emac_ctrl_read(EMAC_CTRL_EWCTL), + emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); + } dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n", emac_read(EMAC_TXIDVER), ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"), @@ -691,6 +711,103 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) } +/** + * emac_get_coalesce : Get interrupt coalesce settings for this device + * @ndev : The DaVinci EMAC network adapter + * @coal : ethtool coalesce settings structure + * + * Fetch the current interrupt coalesce settings + * + */ +static int emac_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct emac_priv *priv = netdev_priv(ndev); + + coal->rx_coalesce_usecs = priv->coal_intvl; + return 0; + +} + +/** + * emac_set_coalesce : Set interrupt coalesce settings for this device + * @ndev : The DaVinci EMAC network adapter + * @coal : ethtool coalesce settings structure + * + * Set interrupt coalesce parameters + * + */ +static int emac_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct emac_priv *priv = netdev_priv(ndev); + u32 int_ctrl, num_interrupts = 0; + u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0; + + if (!coal->rx_coalesce_usecs) + return -EINVAL; + + coal_intvl = coal->rx_coalesce_usecs; + + switch (priv->version) { + case EMAC_VERSION_2: + int_ctrl = emac_ctrl_read(EMAC_DM646X_CMINTCTRL); + prescale = priv->bus_freq_mhz * 4; + + if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL) + coal_intvl = EMAC_DM646X_CMINTMIN_INTVL; + + if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) { + /* + * Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL + * addnl_dvdr)) + coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = EMAC_DM646X_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + + int_ctrl |= EMAC_DM646X_INTPACEEN; + int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK); + int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK); + emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl); + + emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts); + emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts); + + break; + default: + int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT); + int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK); + prescale = coal_intvl * priv->bus_freq_mhz; + if (prescale > EMAC_DM644X_EWINTCNT_MASK) { + prescale = EMAC_DM644X_EWINTCNT_MASK; + coal_intvl = prescale / priv->bus_freq_mhz; + } + emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale)); + + break; + } + + printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl); + priv->coal_intvl = coal_intvl; + + return 0; + +} + + /** * ethtool_ops: DaVinci EMAC Ethtool structure * @@ -702,6 +819,8 @@ static const struct ethtool_ops ethtool_ops = { .get_settings = emac_get_settings, .set_settings = emac_set_settings, .get_link = ethtool_op_get_link, + .get_coalesce = emac_get_coalesce, + .set_coalesce = emac_set_coalesce, }; /** @@ -1180,16 +1299,17 @@ static int emac_net_tx_complete(struct emac_priv *priv, void **net_data_tokens, int num_tokens, u32 ch) { + struct net_device *ndev = priv->ndev; u32 cnt; - if (unlikely(num_tokens && netif_queue_stopped(priv->ndev))) - netif_start_queue(priv->ndev); + if (unlikely(num_tokens && netif_queue_stopped(ndev))) + netif_start_queue(ndev); for (cnt = 0; cnt < num_tokens; cnt++) { struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt]; if (skb == NULL) continue; - priv->net_dev_stats.tx_packets++; - priv->net_dev_stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; dev_kfree_skb_any(skb); } return 0; @@ -1476,7 +1596,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) " err. Out of TX BD's"); netif_stop_queue(priv->ndev); } - priv->net_dev_stats.tx_dropped++; + ndev->stats.tx_dropped++; return NETDEV_TX_BUSY; } @@ -1501,7 +1621,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev) if (netif_msg_tx_err(priv)) dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); - priv->net_dev_stats.tx_errors++; + ndev->stats.tx_errors++; emac_int_disable(priv); emac_stop_txch(priv, EMAC_DEF_TX_CH); emac_cleanup_txch(priv, EMAC_DEF_TX_CH); @@ -1926,14 +2046,14 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch, static int emac_net_rx_cb(struct emac_priv *priv, struct emac_netpktobj *net_pkt_list) { - struct sk_buff *p_skb; - p_skb = (struct sk_buff *)net_pkt_list->pkt_token; + struct net_device *ndev = priv->ndev; + struct sk_buff *p_skb = net_pkt_list->pkt_token; /* set length of packet */ skb_put(p_skb, net_pkt_list->pkt_length); p_skb->protocol = eth_type_trans(p_skb, priv->ndev); netif_receive_skb(p_skb); - priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; - priv->net_dev_stats.rx_packets++; + ndev->stats.rx_bytes += net_pkt_list->pkt_length; + ndev->stats.rx_packets++; return 0; } @@ -2148,7 +2268,7 @@ static int emac_poll(struct napi_struct *napi, int budget) struct net_device *ndev = priv->ndev; struct device *emac_dev = &ndev->dev; u32 status = 0; - u32 num_pkts = 0; + u32 num_tx_pkts = 0, num_rx_pkts = 0; /* Check interrupt vectors and call packet processing */ status = emac_read(EMAC_MACINVECTOR); @@ -2159,27 +2279,19 @@ static int emac_poll(struct napi_struct *napi, int budget) mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; if (status & mask) { - num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH, + num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH, EMAC_DEF_TX_MAX_SERVICE); } /* TX processing */ - if (num_pkts) - return budget; - mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; if (priv->version == EMAC_VERSION_2) mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; if (status & mask) { - num_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget); + num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget); } /* RX processing */ - if (num_pkts < budget) { - napi_complete(napi); - emac_int_enable(priv); - } - mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; if (priv->version == EMAC_VERSION_2) mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT; @@ -2210,9 +2322,12 @@ static int emac_poll(struct napi_struct *napi, int budget) dev_err(emac_dev, "RX Host error %s on ch=%d\n", &emac_rxhost_errcodes[cause][0], ch); } - } /* Host error processing */ + } else if (num_rx_pkts < budget) { + napi_complete(napi); + emac_int_enable(priv); + } - return num_pkts; + return num_rx_pkts; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2437,6 +2552,14 @@ static int emac_dev_open(struct net_device *ndev) /* Start/Enable EMAC hardware */ emac_hw_enable(priv); + /* Enable Interrupt pacing if configured */ + if (priv->coal_intvl != 0) { + struct ethtool_coalesce coal; + + coal.rx_coalesce_usecs = (priv->coal_intvl << 4); + emac_set_coalesce(ndev, &coal); + } + /* find the first phy */ priv->phydev = NULL; if (priv->phy_mask) { @@ -2570,39 +2693,39 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) else stats_clear_mask = 0; - priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); + ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES); emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask); - priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + + ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) + emac_read(EMAC_TXSINGLECOLL) + emac_read(EMAC_TXMULTICOLL)); emac_write(EMAC_TXCOLLISION, stats_clear_mask); emac_write(EMAC_TXSINGLECOLL, stats_clear_mask); emac_write(EMAC_TXMULTICOLL, stats_clear_mask); - priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + + ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + emac_read(EMAC_RXJABBER) + emac_read(EMAC_RXUNDERSIZED)); emac_write(EMAC_RXOVERSIZED, stats_clear_mask); emac_write(EMAC_RXJABBER, stats_clear_mask); emac_write(EMAC_RXUNDERSIZED, stats_clear_mask); - priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + + ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + emac_read(EMAC_RXMOFOVERRUNS)); emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask); emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask); - priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); + ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask); - priv->net_dev_stats.tx_carrier_errors += + ndev->stats.tx_carrier_errors += emac_read(EMAC_TXCARRIERSENSE); emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); - priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); + ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); emac_write(EMAC_TXUNDERRUN, stats_clear_mask); - return &priv->net_dev_stats; + return &ndev->stats; } static const struct net_device_ops emac_netdev_ops = { @@ -2677,6 +2800,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) priv->int_enable = pdata->interrupt_enable; priv->int_disable = pdata->interrupt_disable; + priv->coal_intvl = 0; + priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000); + emac_dev = &ndev->dev; /* Get EMAC platform data */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/depca.c b/drivers/net/depca.c index bf66e9b3b19ed1c461ceb124d81f24b110171e54..44c0694c1f4eb7e9614910970c36d288ce63c6fa 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -2061,18 +2061,35 @@ static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int __init depca_module_init (void) { - int err = 0; + int err = 0; #ifdef CONFIG_MCA - err = mca_register_driver (&depca_mca_driver); + err = mca_register_driver(&depca_mca_driver); + if (err) + goto err; #endif #ifdef CONFIG_EISA - err |= eisa_driver_register (&depca_eisa_driver); + err = eisa_driver_register(&depca_eisa_driver); + if (err) + goto err_mca; #endif - err |= platform_driver_register (&depca_isa_driver); - depca_platform_probe (); + err = platform_driver_register(&depca_isa_driver); + if (err) + goto err_eisa; - return err; + depca_platform_probe(); + return 0; + +err_eisa: +#ifdef CONFIG_EISA + eisa_driver_unregister(&depca_eisa_driver); +err_mca: +#endif +#ifdef CONFIG_MCA + mca_unregister_driver(&depca_mca_driver); +err: +#endif + return err; } static void __exit depca_module_exit (void) diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index abcc838e18af1ca6d57f5256a94828a85b9a8c4c..4fd6b2b4554b18e0072048bd2f018b5497072b89 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -961,7 +961,7 @@ struct dm9000_rxhdr { u8 RxPktReady; u8 RxStatus; __le16 RxLen; -} __attribute__((__packed__)); +} __packed; /* * Received a packet and pass to upper layer diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 8b0f50bbf3e5cd13bfa4b240387071b3d2a939c9..7c075756611ad9d20e752a99105ead383524d342 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -797,7 +797,7 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, if_mii(rq), cmd); + return phy_mii_ioctl(phydev, rq, cmd); } static void dnet_get_drvinfo(struct net_device *dev, @@ -854,7 +854,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) dev = alloc_etherdev(sizeof(*bp)); if (!dev) { dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); - goto err_out; + goto err_out_release_mem; } /* TODO: Actually, we have some interesting features... */ @@ -911,7 +911,8 @@ static int __devinit dnet_probe(struct platform_device *pdev) if (err) dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); - if (dnet_mii_init(bp) != 0) + err = dnet_mii_init(bp); + if (err) goto err_out_unregister_netdev; dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", @@ -936,6 +937,8 @@ static int __devinit dnet_probe(struct platform_device *pdev) iounmap(bp->regs); err_out_free_dev: free_netdev(dev); +err_out_release_mem: + release_mem_region(mem_base, mem_size); err_out: return err; } diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 40b62b406b08327ace3f83626613120be8399b54..99288b95aead4460edc009434be71015730ba95f 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -86,12 +86,12 @@ struct e1000_adapter; /* TX/RX descriptor defines */ #define E1000_DEFAULT_TXD 256 #define E1000_MAX_TXD 256 -#define E1000_MIN_TXD 80 +#define E1000_MIN_TXD 48 #define E1000_MAX_82544_TXD 4096 #define E1000_DEFAULT_RXD 256 #define E1000_MAX_RXD 256 -#define E1000_MIN_RXD 80 +#define E1000_MIN_RXD 48 #define E1000_MAX_82544_RXD 4096 #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ @@ -324,18 +324,20 @@ enum e1000_state_t { extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); #define e_dbg(format, arg...) \ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) -#define e_err(format, arg...) \ - netdev_err(adapter->netdev, format, ## arg) -#define e_info(format, arg...) \ - netdev_info(adapter->netdev, format, ## arg) -#define e_warn(format, arg...) \ - netdev_warn(adapter->netdev, format, ## arg) -#define e_notice(format, arg...) \ - netdev_notice(adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) #define e_dev_info(format, arg...) \ dev_info(&adapter->pdev->dev, format, ## arg) #define e_dev_warn(format, arg...) \ dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) extern char e1000_driver_name[]; extern const char e1000_driver_version[]; diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index d5ff029aa7b226f6ab60c4d96e46c231f19b9c16..f4d0922ec65b8ee6d7541b2ceee8e46cd85bb715 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data) netdev->features &= ~NETIF_F_TSO6; - e_info("TSO is %s\n", data ? "Enabled" : "Disabled"); + e_info(probe, "TSO is %s\n", data ? "Enabled" : "Disabled"); adapter->tso_force = true; return 0; } @@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, writel(write & test[i], address); read = readl(address); if (read != (write & test[i] & mask)) { - e_info("pattern test reg %04X failed: " - "got 0x%08X expected 0x%08X\n", - reg, read, (write & test[i] & mask)); + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); *data = reg; return true; } @@ -734,7 +734,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, writel(write & mask, address); read = readl(address); if ((read & mask) != (write & mask)) { - e_err("set/check reg %04X test failed: " + e_err(drv, "set/check reg %04X test failed: " "got 0x%08X expected 0x%08X\n", reg, (read & mask), (write & mask)); *data = reg; @@ -779,7 +779,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ew32(STATUS, toggle); after = er32(STATUS) & toggle; if (value != after) { - e_err("failed STATUS register test got: " + e_err(drv, "failed STATUS register test got: " "0x%08X expected: 0x%08X\n", after, value); *data = 1; return 1; @@ -894,7 +894,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) *data = 1; return -1; } - e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); @@ -1561,7 +1562,7 @@ static void e1000_diag_test(struct net_device *netdev, u8 forced_speed_duplex = hw->forced_speed_duplex; u8 autoneg = hw->autoneg; - e_info("offline testing starting\n"); + e_info(hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ @@ -1601,7 +1602,7 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) dev_open(netdev); } else { - e_info("online testing starting\n"); + e_info(hw, "online testing starting\n"); /* Online tests */ if (e1000_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1694,8 +1695,8 @@ static void e1000_get_wol(struct net_device *netdev, wol->supported &= ~WAKE_UCAST; if (adapter->wol & E1000_WUFC_EX) - e_err("Interface does not support " - "directed (unicast) frame wake-up packets\n"); + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); break; default: break; @@ -1726,8 +1727,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) switch (hw->device_id) { case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: if (wol->wolopts & WAKE_UCAST) { - e_err("Interface does not support " - "directed (unicast) frame wake-up packets\n"); + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); return -EOPNOTSUPP; } break; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 68a80893dce1daa492c23f6a7e6e0dd290645046..02833af8a0b1d8bbb2953cb353581ede85cb3b9b 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -275,7 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, netdev); if (err) { - e_err("Unable to allocate interrupt Error: %d\n", err); + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); } return err; @@ -657,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter) ew32(WUC, 0); if (e1000_init_hw(hw)) - e_err("Hardware Error\n"); + e_dev_err("Hardware Error\n"); e1000_update_mng_vlan(adapter); /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ @@ -925,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* initialize eeprom parameters */ if (e1000_init_eeprom_params(hw)) { - e_err("EEPROM initialization failed\n"); + e_err(probe, "EEPROM initialization failed\n"); goto err_eeprom; } @@ -936,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* make sure the EEPROM is good */ if (e1000_validate_eeprom_checksum(hw) < 0) { - e_err("The EEPROM Checksum Is Not Valid\n"); + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); e1000_dump_eeprom(adapter); /* * set MAC address to all zeroes to invalidate and temporary @@ -950,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } else { /* copy the MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw)) - e_err("EEPROM Read Error\n"); + e_err(probe, "EEPROM Read Error\n"); } /* don't block initalization here due to bad MAC address */ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) - e_err("Invalid MAC Address\n"); + e_err(probe, "Invalid MAC Address\n"); e1000_get_bus_info(hw); @@ -1047,7 +1047,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, goto err_register; /* print bus type/speed/width info */ - e_info("(PCI%s:%dMHz:%d-bit) %pM\n", + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), ((hw->bus_speed == e1000_bus_speed_133) ? 133 : (hw->bus_speed == e1000_bus_speed_120) ? 120 : @@ -1059,7 +1059,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - e_info("Intel(R) PRO/1000 Network Connection\n"); + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); cards_found++; return 0; @@ -1159,7 +1159,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) /* identify the MAC */ if (e1000_set_mac_type(hw)) { - e_err("Unknown MAC Type\n"); + e_err(probe, "Unknown MAC Type\n"); return -EIO; } @@ -1192,7 +1192,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) adapter->num_rx_queues = 1; if (e1000_alloc_queues(adapter)) { - e_err("Unable to allocate memory for queues\n"); + e_err(probe, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -1386,7 +1386,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * txdr->count; txdr->buffer_info = vmalloc(size); if (!txdr->buffer_info) { - e_err("Unable to allocate memory for the Tx descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Tx descriptor " + "ring\n"); return -ENOMEM; } memset(txdr->buffer_info, 0, size); @@ -1401,7 +1402,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, if (!txdr->desc) { setup_tx_desc_die: vfree(txdr->buffer_info); - e_err("Unable to allocate memory for the Tx descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Tx descriptor " + "ring\n"); return -ENOMEM; } @@ -1409,7 +1411,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { void *olddesc = txdr->desc; dma_addr_t olddma = txdr->dma; - e_err("txdr align check failed: %u bytes at %p\n", + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", txdr->size, txdr->desc); /* Try again, without freeing the previous */ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, @@ -1427,7 +1429,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, txdr->dma); dma_free_coherent(&pdev->dev, txdr->size, olddesc, olddma); - e_err("Unable to allocate aligned memory " + e_err(probe, "Unable to allocate aligned memory " "for the transmit descriptor ring\n"); vfree(txdr->buffer_info); return -ENOMEM; @@ -1460,7 +1462,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); if (err) { - e_err("Allocation for Tx Queue %u failed\n", i); + e_err(probe, "Allocation for Tx Queue %u failed\n", i); for (i-- ; i >= 0; i--) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); @@ -1580,7 +1582,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * rxdr->count; rxdr->buffer_info = vmalloc(size); if (!rxdr->buffer_info) { - e_err("Unable to allocate memory for the Rx descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Rx descriptor " + "ring\n"); return -ENOMEM; } memset(rxdr->buffer_info, 0, size); @@ -1596,7 +1599,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, GFP_KERNEL); if (!rxdr->desc) { - e_err("Unable to allocate memory for the Rx descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Rx descriptor " + "ring\n"); setup_rx_desc_die: vfree(rxdr->buffer_info); return -ENOMEM; @@ -1606,7 +1610,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { void *olddesc = rxdr->desc; dma_addr_t olddma = rxdr->dma; - e_err("rxdr align check failed: %u bytes at %p\n", + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", rxdr->size, rxdr->desc); /* Try again, without freeing the previous */ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, @@ -1615,8 +1619,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, if (!rxdr->desc) { dma_free_coherent(&pdev->dev, rxdr->size, olddesc, olddma); - e_err("Unable to allocate memory for the Rx descriptor " - "ring\n"); + e_err(probe, "Unable to allocate memory for the Rx " + "descriptor ring\n"); goto setup_rx_desc_die; } @@ -1626,8 +1630,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, rxdr->dma); dma_free_coherent(&pdev->dev, rxdr->size, olddesc, olddma); - e_err("Unable to allocate aligned memory for the Rx " - "descriptor ring\n"); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); goto setup_rx_desc_die; } else { /* Free old allocation, new allocation was successful */ @@ -1659,7 +1663,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (err) { - e_err("Allocation for Rx Queue %u failed\n", i); + e_err(probe, "Allocation for Rx Queue %u failed\n", i); for (i-- ; i >= 0; i--) e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); @@ -2110,7 +2114,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); if (!mcarray) { - e_err("memory allocation failed\n"); + e_err(probe, "memory allocation failed\n"); return; } @@ -2648,7 +2652,8 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, break; default: if (unlikely(net_ratelimit())) - e_warn("checksum_partial proto=%x!\n", skb->protocol); + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); break; } @@ -2992,7 +2997,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* fall through */ pull_size = min((unsigned int)4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { - e_err("__pskb_pull_tail failed.\n"); + e_err(drv, "__pskb_pull_tail " + "failed.\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3140,7 +3146,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - e_err("Invalid MTU setting\n"); + e_err(probe, "Invalid MTU setting\n"); return -EINVAL; } @@ -3148,7 +3154,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) switch (hw->mac_type) { case e1000_undefined ... e1000_82542_rev2_1: if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { - e_err("Jumbo Frames not supported.\n"); + e_err(probe, "Jumbo Frames not supported.\n"); return -EINVAL; } break; @@ -3500,7 +3506,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, !(er32(STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ - e_err("Detected Tx Unit Hang\n" + e_err(drv, "Detected Tx Unit Hang\n" " Tx Queue <%lu>\n" " TDH <%x>\n" " TDT <%x>\n" @@ -3749,7 +3755,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* eth type trans needs skb->data to point to something */ if (!pskb_may_pull(skb, ETH_HLEN)) { - e_err("pskb_may_pull failed.\n"); + e_err(drv, "pskb_may_pull failed.\n"); dev_kfree_skb(skb); goto next_desc; } @@ -3874,7 +3880,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (adapter->discarding) { /* All receives must fit into a single buffer */ - e_info("Receive packet consumed multiple buffers\n"); + e_dbg("Receive packet consumed multiple buffers\n"); /* recycle */ buffer_info->skb = skb; if (status & E1000_RXD_STAT_EOP) @@ -3986,8 +3992,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, /* Fix for errata 23, can't cross 64kB boundary */ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { struct sk_buff *oldskb = skb; - e_err("skb align check failed: %u bytes at %p\n", - bufsz, skb->data); + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, skb->data); /* Try again, without freeing the previous */ skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ @@ -4095,8 +4101,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, /* Fix for errata 23, can't cross 64kB boundary */ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { struct sk_buff *oldskb = skb; - e_err("skb align check failed: %u bytes at %p\n", - bufsz, skb->data); + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, skb->data); /* Try again, without freeing the previous */ skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ @@ -4141,8 +4147,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, if (!e1000_check_64k_bound(adapter, (void *)(unsigned long)buffer_info->dma, adapter->rx_buffer_len)) { - e_err("dma align check failed: %u bytes at %p\n", - adapter->rx_buffer_len, + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, (void *)(unsigned long)buffer_info->dma); dev_kfree_skb(skb); buffer_info->skb = NULL; @@ -4355,7 +4361,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw) int ret_val = pci_set_mwi(adapter->pdev); if (ret_val) - e_err("Error in setting MWI\n"); + e_err(probe, "Error in setting MWI\n"); } void e1000_pci_clear_mwi(struct e1000_hw *hw) @@ -4486,7 +4492,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) /* Fiber NICs only allow 1000 gbps Full duplex */ if ((hw->media_type == e1000_media_type_fiber) && spddplx != (SPEED_1000 + DUPLEX_FULL)) { - e_err("Unsupported Speed/Duplex configuration\n"); + e_err(probe, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } @@ -4509,7 +4515,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: - e_err("Unsupported Speed/Duplex configuration\n"); + e_err(probe, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } return 0; diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index f654db9121def8a84df0d154d9c5e54cdffa0d32..a4a0d2b6eb1c60e116811b31378a426ad00a44e2 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 4dc02c71ffd6094417f7d742e0c2e768382fddd4..307a72f483ee644fb1199776e4b0521739badb46 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -359,6 +359,7 @@ #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 #define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 @@ -714,6 +715,7 @@ #define BME1000_E_PHY_ID_R2 0x01410CB1 #define I82577_E_PHY_ID 0x01540050 #define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index c0b3db40bd73d1dce3dea5d1fa686adbc45196c7..f9a31c82f87108c3e9ef6974a6c9089a62cdb5fe 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -164,6 +164,7 @@ enum e1000_boards { board_ich9lan, board_ich10lan, board_pchlan, + board_pch2lan, }; struct e1000_queue_stats { @@ -347,6 +348,7 @@ struct e1000_adapter { u32 test_icr; u32 msg_enable; + unsigned int num_vectors; struct msix_entry *msix_entries; int int_mode; u32 eiac_mask; @@ -421,6 +423,8 @@ struct e1000_info { #define FLAG2_HAS_PHY_WAKEUP (1 << 1) #define FLAG2_IS_DISCARDING (1 << 2) #define FLAG2_DISABLE_ASPM_L1 (1 << 3) +#define FLAG2_HAS_PHY_STATS (1 << 4) +#define FLAG2_HAS_EEE (1 << 5) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) @@ -458,7 +462,6 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); extern void e1000e_update_stats(struct e1000_adapter *adapter); -extern bool e1000e_has_link(struct e1000_adapter *adapter); extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); @@ -476,6 +479,7 @@ extern struct e1000_info e1000_ich8_info; extern struct e1000_info e1000_ich9_info; extern struct e1000_info e1000_ich10_info; extern struct e1000_info e1000_pch_info; +extern struct e1000_info e1000_pch2_info; extern struct e1000_info e1000_es2_info; extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); @@ -494,6 +498,8 @@ extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index 38d79a669059650fa538547af6d240d36679bcaa..45aebb4a6fe1e37f316bac0f0cb656db97829c1b 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 2c521218102b6073e8eee24026daada13028f18a..6355a1b779d3e7f0833c7cd6e96e23b3c9671fc9 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -118,7 +118,6 @@ static int e1000_get_settings(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 status; if (hw->phy.media_type == e1000_media_type_copper) { @@ -156,22 +155,29 @@ static int e1000_get_settings(struct net_device *netdev, ecmd->transceiver = XCVR_EXTERNAL; } - status = er32(STATUS); - if (status & E1000_STATUS_LU) { - if (status & E1000_STATUS_SPEED_1000) - ecmd->speed = 1000; - else if (status & E1000_STATUS_SPEED_100) - ecmd->speed = 100; - else - ecmd->speed = 10; + ecmd->speed = -1; + ecmd->duplex = -1; - if (status & E1000_STATUS_FD) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + ecmd->speed = adapter->link_speed; + ecmd->duplex = adapter->link_duplex - 1; + } } else { - ecmd->speed = -1; - ecmd->duplex = -1; + u32 status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + ecmd->speed = 1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = 100; + else + ecmd->speed = 10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } } ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || @@ -179,7 +185,7 @@ static int e1000_get_settings(struct net_device *netdev, /* MDI-X => 2; MDI =>1; Invalid =>0 */ if ((hw->phy.media_type == e1000_media_type_copper) && - !hw->mac.get_link_status) + netif_carrier_ok(netdev)) ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; else @@ -191,19 +197,15 @@ static int e1000_get_settings(struct net_device *netdev, static u32 e1000_get_link(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_hw *hw = &adapter->hw; /* - * If the link is not reported up to netdev, interrupts are disabled, - * and so the physical link state may have changed since we last - * looked. Set get_link_status to make sure that the true link - * state is interrogated, rather than pulling a cached and possibly - * stale link state from the driver. + * Avoid touching hardware registers when possible, otherwise + * link negotiation can get messed up when user-level scripts + * are rapidly polling the driver to see if link is up. */ - if (!netif_carrier_ok(netdev)) - mac->get_link_status = 1; - - return e1000e_has_link(adapter); + return netif_running(netdev) ? netif_carrier_ok(netdev) : + !!(er32(STATUS) & E1000_STATUS_LU); } static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) @@ -880,6 +882,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) switch (mac->type) { case e1000_ich10lan: case e1000_pchlan: + case e1000_pch2lan: mask |= (1 << 18); break; default: @@ -1263,33 +1266,36 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) hw->mac.autoneg = 0; - /* Workaround: K1 must be disabled for stable 1Gbps operation */ - if (hw->mac.type == e1000_pchlan) - e1000_configure_k1_ich8lan(hw, false); - - if (hw->phy.type == e1000_phy_m88) { - /* Auto-MDI/MDIX Off */ - e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - e1e_wphy(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ - e1e_wphy(hw, PHY_CONTROL, 0x8140); - } else if (hw->phy.type == e1000_phy_gg82563) - e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); - - ctrl_reg = er32(CTRL); - - switch (hw->phy.type) { - case e1000_phy_ife: + if (hw->phy.type == e1000_phy_ife) { /* force 100, set loopback */ e1e_wphy(hw, PHY_CONTROL, 0x6100); /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_SPD_100 |/* Force Speed to 100 */ E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + udelay(500); + + return 0; + } + + /* Specific PHY configuration for loopback */ + switch (hw->phy.type) { + case e1000_phy_m88: + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1e_wphy(hw, PHY_CONTROL, 0x8140); + break; + case e1000_phy_gg82563: + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); break; case e1000_phy_bm: /* Set Default MAC Interface speed to 1GB */ @@ -1312,23 +1318,41 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) /* Set Early Link Enable */ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); - /* fall through */ + break; + case e1000_phy_82577: + case e1000_phy_82578: + /* Workaround: K1 must be disabled for stable 1Gbps operation */ + e1000_configure_k1_ich8lan(hw, false); + break; + case e1000_phy_82579: + /* Disable PHY energy detect power down */ + e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + /* Disable full chip energy detect */ + e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); + /* Enable loopback on the PHY */ +#define I82577_PHY_LBK_CTRL 19 + e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); + break; default: - /* force 1000, set loopback */ - e1e_wphy(hw, PHY_CONTROL, 0x4140); - mdelay(250); + break; + } - /* Now set up the MAC to the same speed/duplex as the PHY. */ - ctrl_reg = er32(CTRL); - ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ - ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ - E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ - E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ - E1000_CTRL_FD); /* Force Duplex to FULL */ + /* force 1000, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x4140); + mdelay(250); - if (adapter->flags & FLAG_IS_ICH) - ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ - } + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (adapter->flags & FLAG_IS_ICH) + ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ if (hw->phy.media_type == e1000_media_type_copper && hw->phy.type == e1000_phy_m88) { @@ -1868,6 +1892,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data) if ((hw->phy.type == e1000_phy_ife) || (hw->mac.type == e1000_pchlan) || + (hw->mac.type == e1000_pch2lan) || (hw->mac.type == e1000_82583) || (hw->mac.type == e1000_82574)) { INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); @@ -2026,7 +2051,6 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, }; void e1000e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 5d1220d188d46f88e29d6f8c9760a3fc3c92a54d..66ed08f726fb9bcdd13225086d05f3dcf437da6a 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -217,7 +217,10 @@ enum e1e_registers { E1000_SWSM = 0x05B50, /* SW Semaphore */ E1000_FWSM = 0x05B54, /* FW Semaphore */ E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ - E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */ + E1000_FFLT_DBG = 0x05F04, /* Debug Register */ + E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ +#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) +#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE E1000_HICR = 0x08F00, /* Host Interface Control */ }; @@ -303,13 +306,14 @@ enum e1e_registers { #define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 #define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 #define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ #define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ #define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ #define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 -#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E -#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 #define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ @@ -387,6 +391,8 @@ enum e1e_registers { #define E1000_DEV_ID_PCH_M_HV_LC 0x10EB #define E1000_DEV_ID_PCH_D_HV_DM 0x10EF #define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 #define E1000_REVISION_4 4 @@ -406,6 +412,7 @@ enum e1000_mac_type { e1000_ich9lan, e1000_ich10lan, e1000_pchlan, + e1000_pch2lan, }; enum e1000_media_type { @@ -442,6 +449,7 @@ enum e1000_phy_type { e1000_phy_bm, e1000_phy_82578, e1000_phy_82577, + e1000_phy_82579, }; enum e1000_bus_width { @@ -929,6 +937,7 @@ struct e1000_dev_spec_ich8lan { bool kmrn_lock_loss_workaround_enabled; struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; bool nvm_k1_enabled; + bool eee_disable; }; struct e1000_hw { diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index b2507d93de99834be065cc638fe105796863e227..63930d12711cf44ceb3334e5334fef5089ecb2e2 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -52,6 +52,8 @@ * 82577LC Gigabit Network Connection * 82578DM Gigabit Network Connection * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection */ #include "e1000.h" @@ -126,6 +128,13 @@ #define HV_SMB_ADDR_PEC_EN 0x0200 #define HV_SMB_ADDR_VALID 0x0080 +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 + /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 @@ -226,6 +235,8 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) { @@ -277,13 +288,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + /* + * The MAC-PHY interconnect may still be in SMBus mode + * after Sx->S0. If the manageability engine (ME) is + * disabled, then toggle the LANPHYPC Value bit to force + * the interconnect to PCIe mode. + */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { - /* - * The MAC-PHY interconnect may still be in SMBus mode - * after Sx->S0. Toggle the LANPHYPC Value bit to force - * the interconnect to PCIe mode, but only if there is no - * firmware present otherwise firmware will have done it. - */ ctrl = er32(CTRL); ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; @@ -324,6 +335,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) switch (phy->type) { case e1000_phy_82577: + case e1000_phy_82579: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; @@ -515,6 +527,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; /* ID LED init */ mac->ops.id_led_init = e1000e_id_led_init; /* setup LED */ @@ -526,6 +540,9 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) mac->ops.led_off = e1000_led_off_ich8lan; break; case e1000_pchlan: + case e1000_pch2lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_pchlan; /* setup LED */ @@ -544,9 +561,46 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + /* Disable PHY configuration by hardware, config by software */ + if (mac->type == e1000_pch2lan) { + u32 extcnf_ctrl = er32(EXTCNF_CTRL); + + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + } + return 0; } +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. The bits in + * the LPI Control register will remain set only if/when link is up. + **/ +static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_reg; + + if (hw->phy.type != e1000_phy_82579) + goto out; + + ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + if (ret_val) + goto out; + + if (hw->dev_spec.ich8lan.eee_disable) + phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; + else + phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; + + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); +out: + return ret_val; +} + /** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure @@ -604,6 +658,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) */ e1000e_check_downshift(hw); + /* Enable/Disable EEE after link up */ + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + goto out; + /* * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. @@ -647,10 +706,19 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) if (rc) return rc; - if (hw->mac.type == e1000_pchlan) - rc = e1000_init_phy_params_pchlan(hw); - else + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: rc = e1000_init_phy_params_ich8lan(hw); + break; + case e1000_pchlan: + case e1000_pch2lan: + rc = e1000_init_phy_params_pchlan(hw); + break; + default: + break; + } if (rc) return rc; @@ -663,6 +731,10 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) (adapter->hw.phy.type == e1000_phy_igp_3)) adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + /* Disable EEE by default until IEEE802.3az spec is finalized */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->hw.dev_spec.ich8lan.eee_disable = true; + return 0; } @@ -774,7 +846,7 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) * e1000_check_mng_mode_ich8lan - Checks management mode * @hw: pointer to the HW structure * - * This checks if the adapter has manageability enabled. + * This checks if the adapter has any manageability enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ @@ -783,9 +855,26 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) u32 fwsm; fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; - return (fwsm & E1000_FWSM_MODE_MASK) == - (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** @@ -820,14 +909,6 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) s32 ret_val = 0; u16 word_addr, reg_data, reg_addr, phy_page = 0; - if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) && - !(hw->mac.type == e1000_pchlan)) - return ret_val; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - /* * Initialize the PHY from the NVM on ICH platforms. This * is needed due to an issue where the NVM configuration is @@ -835,12 +916,27 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) * Therefore, after each PHY reset, we will load the * configuration data out of the NVM manually. */ - if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || - (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) || - (hw->mac.type == e1000_pchlan)) + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; - else - sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; data = er32(FEXTNVM); if (!(data & sw_cfg_mask)) @@ -851,8 +947,10 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) * extended configuration before SW configuration */ data = er32(EXTCNF_CTRL); - if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) - goto out; + if (!(hw->mac.type == e1000_pch2lan)) { + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + goto out; + } cnf_size = er32(EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; @@ -864,7 +962,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && - (hw->mac.type == e1000_pchlan)) { + ((hw->mac.type == e1000_pchlan) || + (hw->mac.type == e1000_pch2lan))) { /* * HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. @@ -1071,16 +1170,18 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) u32 mac_reg; u16 oem_reg; - if (hw->mac.type != e1000_pchlan) + if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - mac_reg = er32(EXTCNF_CTRL); - if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) - goto out; + if (!(hw->mac.type == e1000_pch2lan)) { + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto out; + } mac_reg = er32(FEXTNVM); if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) @@ -1220,6 +1321,243 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) return ret_val; } +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + mac_reg = er32(RAL(i)); + e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); + e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); + mac_reg = er32(RAH(i)); + e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); + e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000)); + } +} + +static u32 e1000_calc_rx_da_crc(u8 mac[]) +{ + u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ + u32 i, j, mask, crc; + + crc = 0xffffffff; + for (i = 0; i < 6; i++) { + crc = crc ^ mac[i]; + for (j = 8; j > 0; j--) { + mask = (crc & 1) * (-1); + crc = (crc >> 1) ^ (poly & mask); + } + } + return ~crc; +} + +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = 0; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* disable Rx path while enabling/disabling workaround */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + if (ret_val) + goto out; + + if (enable) { + /* + * Write Rx addresses (rar_entry_count for RAL/H, +4 for + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + u8 mac_addr[ETH_ALEN] = {0}; + u32 addr_high, addr_low; + + addr_high = er32(RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = er32(RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + + ew32(PCH_RAICC(i), + e1000_calc_rx_da_crc(mac_addr)); + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg |= E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Enable jumbo frame workaround in the PHY */ + e1e_rphy(hw, PHY_REG(769, 20), &data); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + data |= (1 << 12); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x1A << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + if (ret_val) + goto out; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(0xF << 14); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + ew32(FFLT_DBG, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Write PHY register values back to h/w defaults */ + e1e_rphy(hw, PHY_REG(769, 20), &data); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 12); + data |= (1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + if (ret_val) + goto out; + } + + /* re-enable Rx path after enabling/disabling workaround */ + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); + +out: + return ret_val; +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + +out: + return ret_val; +} + /** * e1000_lan_init_done_ich8lan - Check for PHY config completion * @hw: pointer to the HW structure @@ -1271,12 +1609,17 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) if (ret_val) goto out; break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + goto out; + break; default: break; } /* Dummy read to clear the phy wakeup bit after lcd reset */ - if (hw->mac.type == e1000_pchlan) + if (hw->mac.type >= e1000_pchlan) e1e_rphy(hw, BM_WUC, ®); /* Configure the LCD with the extended configuration region in NVM */ @@ -2800,6 +3143,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) ew32(FCTTV, hw->fc.pause_time); if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_82577)) { ew32(FCRTV_PCH, hw->fc.refresh_time); @@ -2863,6 +3207,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; break; case e1000_phy_82577: + case e1000_phy_82579: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; @@ -3116,21 +3461,12 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) { u32 phy_ctrl; - switch (hw->mac.type) { - case e1000_ich8lan: - case e1000_ich9lan: - case e1000_ich10lan: - case e1000_pchlan: - phy_ctrl = er32(PHY_CTRL); - phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | - E1000_PHY_CTRL_GBE_DISABLE; - ew32(PHY_CTRL, phy_ctrl); + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; + ew32(PHY_CTRL, phy_ctrl); - if (hw->mac.type == e1000_pchlan) - e1000_phy_hw_reset_ich8lan(hw); - default: - break; - } + if (hw->mac.type >= e1000_pchlan) + e1000_phy_hw_reset_ich8lan(hw); } /** @@ -3370,6 +3706,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) /* Clear PHY statistics registers */ if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_82577)) { hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); @@ -3390,7 +3727,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) static struct e1000_mac_operations ich8_mac_ops = { .id_led_init = e1000e_id_led_init, - .check_mng_mode = e1000_check_mng_mode_ich8lan, + /* check_mng_mode dependent on mac type */ .check_for_link = e1000_check_for_copper_link_ich8lan, /* cleanup_led dependent on mac type */ .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, @@ -3497,6 +3834,7 @@ struct e1000_info e1000_pch_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS, .pba = 26, .max_hw_frame_size = 4096, .get_variants = e1000_get_variants_ich8lan, @@ -3504,3 +3842,23 @@ struct e1000_info e1000_pch_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; + +struct e1000_info e1000_pch2_info = { + .mac = e1000_pch2lan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 18, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index a968e3a416ac4609678426ce25cc7ce484d16727..df4a2792293123eba7b1b6f3a589220ce1185ea1 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9f13b660b8018a04e8c2c30e0b971e255f86e4a3..36d31a41632050f7e75d8f79a4ad16e79a02e59c 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -52,7 +52,9 @@ #include "e1000.h" -#define DRV_VERSION "1.0.2-k4" +#define DRV_EXTRAVERSION "-k2" + +#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -67,6 +69,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_ich9lan] = &e1000_ich9_info, [board_ich10lan] = &e1000_ich10_info, [board_pchlan] = &e1000_pch_info, + [board_pch2lan] = &e1000_pch2_info, }; struct e1000_reg_info { @@ -221,10 +224,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 0, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)buffer_info->dma, + (unsigned long long)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, - (u64)buffer_info->time_stamp); + (unsigned long long)buffer_info->time_stamp); /* Print TX Rings */ if (!netif_msg_tx_done(adapter)) @@ -276,9 +279,11 @@ static void e1000e_dump(struct e1000_adapter *adapter) "%04X %3X %016llX %p", (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, - le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)buffer_info->dma, buffer_info->length, - buffer_info->next_to_watch, (u64)buffer_info->time_stamp, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, buffer_info->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) printk(KERN_CONT " NTC/U\n"); @@ -353,19 +358,19 @@ static void e1000e_dump(struct e1000_adapter *adapter) printk(KERN_INFO "RWB[0x%03X] %016llX " "%016llX %016llX %016llX " "---------------- %p", i, - le64_to_cpu(u1->a), - le64_to_cpu(u1->b), - le64_to_cpu(u1->c), - le64_to_cpu(u1->d), + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), buffer_info->skb); } else { printk(KERN_INFO "R [0x%03X] %016llX " "%016llX %016llX %016llX %016llX %p", i, - le64_to_cpu(u1->a), - le64_to_cpu(u1->b), - le64_to_cpu(u1->c), - le64_to_cpu(u1->d), - (u64)buffer_info->dma, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, buffer_info->skb); if (netif_msg_pktdata(adapter)) @@ -402,9 +407,11 @@ static void e1000e_dump(struct e1000_adapter *adapter) buffer_info = &rx_ring->buffer_info[i]; u0 = (struct my_u0 *)rx_desc; printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " - "%016llX %p", - i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)buffer_info->dma, buffer_info->skb); + "%016llX %p", i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); if (i == rx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == rx_ring->next_to_clean) @@ -1778,25 +1785,25 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) { int err; - int numvecs, i; - + int i; switch (adapter->int_mode) { case E1000E_INT_MODE_MSIX: if (adapter->flags & FLAG_HAS_MSIX) { - numvecs = 3; /* RxQ0, TxQ0 and other */ - adapter->msix_entries = kcalloc(numvecs, + adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(adapter->num_vectors, sizeof(struct msix_entry), GFP_KERNEL); if (adapter->msix_entries) { - for (i = 0; i < numvecs; i++) + for (i = 0; i < adapter->num_vectors; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - numvecs); - if (err == 0) + adapter->num_vectors); + if (err == 0) { return; + } } /* MSI-X failed, so fall through and try MSI */ e_err("Failed to initialize MSI-X interrupts. " @@ -1818,6 +1825,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) /* Don't do anything; this is the system default */ break; } + + /* store the number of vectors being used */ + adapter->num_vectors = 1; } /** @@ -1939,7 +1949,14 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) if (adapter->msix_entries) ew32(EIAC_82574, 0); e1e_flush(); - synchronize_irq(adapter->pdev->irq); + + if (adapter->msix_entries) { + int i; + for (i = 0; i < adapter->num_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } } /** @@ -2723,6 +2740,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) e1e_wphy(hw, 22, phy_data); } + /* Workaround Si errata on 82579 - configure jumbo frame flow */ + if (hw->mac.type == e1000_pch2lan) { + s32 ret_val; + + if (rctl & E1000_RCTL_LPE) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + } + /* Setup buffer sizes */ rctl &= ~E1000_RCTL_SZ_4096; rctl |= E1000_RCTL_BSEX; @@ -2759,7 +2786,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) * per packet. */ pages = PAGE_USE_COUNT(adapter->netdev->mtu); - if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && + if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) adapter->rx_ps_pages = pages; else @@ -3118,7 +3145,27 @@ void e1000e_reset(struct e1000_adapter *adapter) * with ERT support assuming ERT set to E1000_ERT_2048), or * - the full Rx FIFO size minus one full frame */ - if (hw->mac.type == e1000_pchlan) { + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + switch (hw->mac.type) { + default: + if ((adapter->flags & FLAG_HAS_ERT) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - (E1000_ERT_2048 << 3))); + else + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); + + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + break; + case e1000_pchlan: /* * Workaround PCH LOM adapter hangs with certain network * loads. If hangs persist, try disabling Tx flow control. @@ -3131,26 +3178,15 @@ void e1000e_reset(struct e1000_adapter *adapter) fc->low_water = 0x3000; } fc->refresh_time = 0x1000; - } else { - if ((adapter->flags & FLAG_HAS_ERT) && - (adapter->netdev->mtu > ETH_DATA_LEN)) - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - (E1000_ERT_2048 << 3))); - else - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - adapter->max_frame_size)); - - fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ - fc->low_water = fc->high_water - 8; + break; + case e1000_pch2lan: + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + fc->refresh_time = 0x0400; + break; } - if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) - fc->pause_time = 0xFFFF; - else - fc->pause_time = E1000_FC_PAUSE_TIME; - fc->send_xon = 1; - fc->current_mode = fc->requested_mode; - /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); @@ -3162,8 +3198,6 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000_get_hw_control(adapter); ew32(WUC, 0); - if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) - e1e_wphy(&adapter->hw, BM_WUC, 0); if (mac->ops.init_hw(hw)) e_err("Hardware Error\n"); @@ -3194,12 +3228,6 @@ int e1000e_up(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - /* DMA latency requirement to workaround early-receive/jumbo issue */ - if (adapter->flags & FLAG_HAS_ERT) - pm_qos_add_request(&adapter->netdev->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - /* hardware has been reset, we need to reload some things */ e1000_configure(adapter); @@ -3263,9 +3291,6 @@ void e1000e_down(struct e1000_adapter *adapter) e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); - if (adapter->flags & FLAG_HAS_ERT) - pm_qos_remove_request(&adapter->netdev->pm_qos_req); - /* * TODO: for power management, we could drop the link and * pci_disable_device here. @@ -3416,13 +3441,18 @@ static int e1000_test_msi(struct e1000_adapter *adapter) /* disable SERR in case the MSI write causes a master abort */ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); - pci_write_config_word(adapter->pdev, PCI_COMMAND, - pci_cmd & ~PCI_COMMAND_SERR); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); err = e1000_test_msi_interrupt(adapter); - /* restore previous setting of command word */ - pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } /* success ! */ if (!err) @@ -3495,6 +3525,12 @@ static int e1000_open(struct net_device *netdev) E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) e1000_update_mng_vlan(adapter); + /* DMA latency requirement to workaround early-receive/jumbo issue */ + if (adapter->flags & FLAG_HAS_ERT) + pm_qos_add_request(&adapter->netdev->pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + /* * before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt @@ -3599,6 +3635,9 @@ static int e1000_close(struct net_device *netdev) if (adapter->flags & FLAG_HAS_AMT) e1000_release_hw_control(adapter); + if (adapter->flags & FLAG_HAS_ERT) + pm_qos_remove_request(&adapter->netdev->pm_qos_req); + pm_runtime_put_sync(&pdev->dev); return 0; @@ -3668,6 +3707,110 @@ static void e1000_update_phy_info(unsigned long data) schedule_work(&adapter->update_phy_task); } +/** + * e1000e_update_phy_stats - Update the PHY statistics counters + * @adapter: board private structure + **/ +static void e1000e_update_phy_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val; + u16 phy_data; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + hw->phy.addr = 1; + +#define HV_PHY_STATS_PAGE 778 + /* + * A page set is expensive so check if already on desired page. + * If not, set to the page with the PHY status registers. + */ + ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + &phy_data); + if (ret_val) + goto release; + if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) { + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (HV_PHY_STATS_PAGE << + IGP_PAGE_SHIFT)); + if (ret_val) + goto release; + } + + /* Read/clear the upper 16-bit registers and read/accumulate lower */ + + /* Single Collision Count */ + e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS, + &phy_data); + ret_val = e1000e_read_phy_reg_mdic(hw, + HV_SCC_LOWER & MAX_PHY_REG_ADDRESS, + &phy_data); + if (!ret_val) + adapter->stats.scc += phy_data; + + /* Excessive Collision Count */ + e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS, + &phy_data); + ret_val = e1000e_read_phy_reg_mdic(hw, + HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS, + &phy_data); + if (!ret_val) + adapter->stats.ecol += phy_data; + + /* Multiple Collision Count */ + e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS, + &phy_data); + ret_val = e1000e_read_phy_reg_mdic(hw, + HV_MCC_LOWER & MAX_PHY_REG_ADDRESS, + &phy_data); + if (!ret_val) + adapter->stats.mcc += phy_data; + + /* Late Collision Count */ + e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS, + &phy_data); + ret_val = e1000e_read_phy_reg_mdic(hw, + HV_LATECOL_LOWER & + MAX_PHY_REG_ADDRESS, + &phy_data); + if (!ret_val) + adapter->stats.latecol += phy_data; + + /* Collision Count - also used for adaptive IFS */ + e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS, + &phy_data); + ret_val = e1000e_read_phy_reg_mdic(hw, + HV_COLC_LOWER & MAX_PHY_REG_ADDRESS, + &phy_data); + if (!ret_val) + hw->mac.collision_delta = phy_data; + + /* Defer Count */ + e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS, + &phy_data); + ret_val = e1000e_read_phy_reg_mdic(hw, + HV_DC_LOWER & MAX_PHY_REG_ADDRESS, + &phy_data); + if (!ret_val) + adapter->stats.dc += phy_data; + + /* Transmit with no CRS */ + e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS, + &phy_data); + ret_val = e1000e_read_phy_reg_mdic(hw, + HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS, + &phy_data); + if (!ret_val) + adapter->stats.tncrs += phy_data; + +release: + hw->phy.ops.release(hw); +} + /** * e1000e_update_stats - Update the board statistics counters * @adapter: board private structure @@ -3677,7 +3820,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - u16 phy_data; /* * Prevent stats update while adapter is being reset, or if the pci @@ -3697,34 +3839,27 @@ void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.roc += er32(ROC); adapter->stats.mpc += er32(MPC); - if ((hw->phy.type == e1000_phy_82578) || - (hw->phy.type == e1000_phy_82577)) { - e1e_rphy(hw, HV_SCC_UPPER, &phy_data); - if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) - adapter->stats.scc += phy_data; - - e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); - if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) - adapter->stats.ecol += phy_data; - - e1e_rphy(hw, HV_MCC_UPPER, &phy_data); - if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) - adapter->stats.mcc += phy_data; - - e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); - if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) - adapter->stats.latecol += phy_data; - - e1e_rphy(hw, HV_DC_UPPER, &phy_data); - if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data)) - adapter->stats.dc += phy_data; - } else { - adapter->stats.scc += er32(SCC); - adapter->stats.ecol += er32(ECOL); - adapter->stats.mcc += er32(MCC); - adapter->stats.latecol += er32(LATECOL); - adapter->stats.dc += er32(DC); + + /* Half-duplex statistics */ + if (adapter->link_duplex == HALF_DUPLEX) { + if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { + e1000e_update_phy_stats(adapter); + } else { + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + + hw->mac.collision_delta = er32(COLC); + + if ((hw->mac.type != e1000_82574) && + (hw->mac.type != e1000_82583)) + adapter->stats.tncrs += er32(TNCRS); + } + adapter->stats.colc += hw->mac.collision_delta; } + adapter->stats.xonrxc += er32(XONRXC); adapter->stats.xontxc += er32(XONTXC); adapter->stats.xoffrxc += er32(XOFFRXC); @@ -3742,28 +3877,9 @@ void e1000e_update_stats(struct e1000_adapter *adapter) hw->mac.tx_packet_delta = er32(TPT); adapter->stats.tpt += hw->mac.tx_packet_delta; - if ((hw->phy.type == e1000_phy_82578) || - (hw->phy.type == e1000_phy_82577)) { - e1e_rphy(hw, HV_COLC_UPPER, &phy_data); - if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data)) - hw->mac.collision_delta = phy_data; - } else { - hw->mac.collision_delta = er32(COLC); - } - adapter->stats.colc += hw->mac.collision_delta; adapter->stats.algnerrc += er32(ALGNERRC); adapter->stats.rxerrc += er32(RXERRC); - if ((hw->phy.type == e1000_phy_82578) || - (hw->phy.type == e1000_phy_82577)) { - e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); - if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data)) - adapter->stats.tncrs += phy_data; - } else { - if ((hw->mac.type != e1000_82574) && - (hw->mac.type != e1000_82583)) - adapter->stats.tncrs += er32(TNCRS); - } adapter->stats.cexterr += er32(CEXTERR); adapter->stats.tsctc += er32(TSCTC); adapter->stats.tsctfc += er32(TSCTFC); @@ -3862,7 +3978,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); } -bool e1000e_has_link(struct e1000_adapter *adapter) +static bool e1000e_has_link(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; bool link_active = 0; @@ -4838,14 +4954,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) int retval = 0; /* copy MAC RARs to PHY RARs */ - for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) { - mac_reg = er32(RAL(i)); - e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); - e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); - mac_reg = er32(RAH(i)); - e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); - e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF)); - } + e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* copy MAC MTA to PHY MTA */ for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { @@ -5548,8 +5657,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_sw_init; - err = -EIO; - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); @@ -5896,6 +6003,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); @@ -5932,7 +6042,7 @@ static int __init e1000_init_module(void) int ret; pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n"); + pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index a150e48a117f2da94ff73727252263672e51bb8f..34aeec13bb168390f869cbb91b30a4c5655b6fa5 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index b4ac82d51b2016809a2aee7e1dd61cb644868d5e..3d3dc0c823554ab6da90d56ec2d5940338113bb7 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2010 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -2319,6 +2319,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case I82577_E_PHY_ID: phy_type = e1000_phy_82577; break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; default: phy_type = e1000_phy_unknown; break; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 8b92acb448c2691e7d2ca4de669c61e4a33677e9..3beba70b7dea5827c45927d2c9b550bb68d148f5 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -335,7 +335,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) memset(stats, 0, sizeof(*stats)); - cb2 = (void *)get_zeroed_page(GFP_ATOMIC); + cb2 = (void *)get_zeroed_page(GFP_KERNEL); if (!cb2) { ehea_error("no mem for cb2"); goto out; diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index 882c50c9c34fdaac9970fdf3d0c65d93da48f4a0..f608a6c54af5845727494c9749e2b786390dfa78 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -126,7 +126,7 @@ struct ehea_swqe { u8 immediate_data[SWQE2_MAX_IMM]; /* 0xd0 */ struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; - } immdata_desc __attribute__ ((packed)); + } immdata_desc __packed; /* Send WQE Format 3 */ struct { diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h index 1eb289f773bf1b32880908d8199da790423ea13d..d6dd1b4edf6e3c80cc8e8f8fc94d5da8a89b3b28 100644 --- a/drivers/net/enic/cq_desc.h +++ b/drivers/net/enic/cq_desc.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h index 337d1943af4645f8fe3999c9d6d51494eecf1fc4..c2c0680a1146793c2bf4393dbd746835f6395aa9 100644 --- a/drivers/net/enic/cq_enet_desc.h +++ b/drivers/net/enic/cq_enet_desc.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -73,7 +73,16 @@ struct cq_enet_rq_desc { #define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) #define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) -#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4 +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12 +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3 +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13 + +#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8 #define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) #define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 @@ -96,7 +105,7 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *type, u8 *color, u16 *q_number, u16 *completed_index, u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, - u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof, + u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof, u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) @@ -136,7 +145,10 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, *vlan_stripped = (bytes_written_flags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; - *vlan = le16_to_cpu(desc->vlan); + /* + * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) + */ + *vlan_tci = le16_to_cpu(desc->vlan); if (*fcoe) { *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index 45e86d1e5b1b29545371a7a1129fbb99f94cfb40..f239aa8c6f4ce6d6e86341ab09ca461fdd76f89e 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -20,8 +20,6 @@ #ifndef _ENIC_H_ #define _ENIC_H_ -#include - #include "vnic_enet.h" #include "vnic_dev.h" #include "vnic_wq.h" @@ -34,12 +32,8 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "1.3.1.1-pp" -#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" -#define PFX DRV_NAME ": " - -#define ENIC_LRO_MAX_DESC 8 -#define ENIC_LRO_MAX_AGGR 64 +#define DRV_VERSION "1.4.1.1" +#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 @@ -116,6 +110,8 @@ struct enic { spinlock_t wq_lock[ENIC_WQ_MAX]; unsigned int wq_count; struct vlan_group *vlan_group; + u16 loop_enable; + u16 loop_tag; /* receive queue cache line section */ ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; @@ -124,8 +120,6 @@ struct enic { u64 rq_truncated_pkts; u64 rq_bad_fcs; struct napi_struct napi; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC]; /* interrupt resource cache line section */ ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; @@ -137,4 +131,9 @@ struct enic { unsigned int cq_count; }; +static inline struct device *enic_get_dev(struct enic *enic) +{ + return &(enic->pdev->dev); +} + #endif /* _ENIC_H_ */ diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index bc7d6b96de3dec39ca96b369a6656ec22ab04a12..77a7f87d498e5800fe8e9708d54b34a2e43c6bc0 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -29,12 +29,12 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include "cq_enet_desc.h" @@ -145,15 +145,25 @@ static int enic_get_settings(struct net_device *netdev, return 0; } +static int enic_dev_fw_info(struct enic *enic, + struct vnic_devcmd_fw_info **fw_info) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_fw_info(enic->vdev, fw_info); + spin_unlock(&enic->devcmd_lock); + + return err; +} + static void enic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct enic *enic = netdev_priv(netdev); struct vnic_devcmd_fw_info *fw_info; - spin_lock(&enic->devcmd_lock); - vnic_dev_fw_info(enic->vdev, &fw_info); - spin_unlock(&enic->devcmd_lock); + enic_dev_fw_info(enic, &fw_info); strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); @@ -191,6 +201,17 @@ static int enic_get_sset_count(struct net_device *netdev, int sset) } } +static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_stats_dump(enic->vdev, vstats); + spin_unlock(&enic->devcmd_lock); + + return err; +} + static void enic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { @@ -198,9 +219,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, struct vnic_stats *vstats; unsigned int i; - spin_lock(&enic->devcmd_lock); - vnic_dev_stats_dump(enic->vdev, &vstats); - spin_unlock(&enic->devcmd_lock); + enic_dev_stats_dump(enic, &vstats); for (i = 0; i < enic_n_tx_stats; i++) *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; @@ -346,7 +365,6 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_coalesce = enic_get_coalesce, .set_coalesce = enic_set_coalesce, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, }; static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) @@ -399,54 +417,55 @@ static void enic_log_q_error(struct enic *enic) for (i = 0; i < enic->wq_count; i++) { error_status = vnic_wq_error_status(&enic->wq[i]); if (error_status) - printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", - enic->netdev->name, i, error_status); + netdev_err(enic->netdev, "WQ[%d] error_status %d\n", + i, error_status); } for (i = 0; i < enic->rq_count; i++) { error_status = vnic_rq_error_status(&enic->rq[i]); if (error_status) - printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", - enic->netdev->name, i, error_status); + netdev_err(enic->netdev, "RQ[%d] error_status %d\n", + i, error_status); } } -static void enic_link_check(struct enic *enic) +static void enic_msglvl_check(struct enic *enic) { - int link_status = vnic_dev_link_status(enic->vdev); - int carrier_ok = netif_carrier_ok(enic->netdev); + u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); - if (link_status && !carrier_ok) { - printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); - netif_carrier_on(enic->netdev); - } else if (!link_status && carrier_ok) { - printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); - netif_carrier_off(enic->netdev); + if (msg_enable != enic->msg_enable) { + netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", + enic->msg_enable, msg_enable); + enic->msg_enable = msg_enable; } } static void enic_mtu_check(struct enic *enic) { u32 mtu = vnic_dev_mtu(enic->vdev); + struct net_device *netdev = enic->netdev; if (mtu && mtu != enic->port_mtu) { enic->port_mtu = mtu; - if (mtu < enic->netdev->mtu) - printk(KERN_WARNING PFX - "%s: interface MTU (%d) set higher " + if (mtu < netdev->mtu) + netdev_warn(netdev, + "interface MTU (%d) set higher " "than switch port MTU (%d)\n", - enic->netdev->name, enic->netdev->mtu, mtu); + netdev->mtu, mtu); } } -static void enic_msglvl_check(struct enic *enic) +static void enic_link_check(struct enic *enic) { - u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); + int link_status = vnic_dev_link_status(enic->vdev); + int carrier_ok = netif_carrier_ok(enic->netdev); - if (msg_enable != enic->msg_enable) { - printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", - enic->netdev->name, enic->msg_enable, msg_enable); - enic->msg_enable = msg_enable; + if (link_status && !carrier_ok) { + netdev_info(enic->netdev, "Link UP\n"); + netif_carrier_on(enic->netdev); + } else if (!link_status && carrier_ok) { + netdev_info(enic->netdev, "Link DOWN\n"); + netif_carrier_off(enic->netdev); } } @@ -574,7 +593,7 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) static inline void enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, - unsigned int len_left) + unsigned int len_left, int loopback) { skb_frag_t *frag; @@ -586,13 +605,14 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic, frag->page_offset, frag->size, PCI_DMA_TODEVICE), frag->size, - (len_left == 0)); /* EOP? */ + (len_left == 0), /* EOP? */ + loopback); } } static inline void enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, - int vlan_tag_insert, unsigned int vlan_tag) + int vlan_tag_insert, unsigned int vlan_tag, int loopback) { unsigned int head_len = skb_headlen(skb); unsigned int len_left = skb->len - head_len; @@ -608,15 +628,15 @@ static inline void enic_queue_wq_skb_vlan(struct enic *enic, head_len, PCI_DMA_TODEVICE), head_len, vlan_tag_insert, vlan_tag, - eop); + eop, loopback); if (!eop) - enic_queue_wq_skb_cont(enic, wq, skb, len_left); + enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); } static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, - int vlan_tag_insert, unsigned int vlan_tag) + int vlan_tag_insert, unsigned int vlan_tag, int loopback) { unsigned int head_len = skb_headlen(skb); unsigned int len_left = skb->len - head_len; @@ -636,15 +656,15 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, csum_offset, hdr_len, vlan_tag_insert, vlan_tag, - eop); + eop, loopback); if (!eop) - enic_queue_wq_skb_cont(enic, wq, skb, len_left); + enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); } static inline void enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, - int vlan_tag_insert, unsigned int vlan_tag) + int vlan_tag_insert, unsigned int vlan_tag, int loopback) { unsigned int frag_len_left = skb_headlen(skb); unsigned int len_left = skb->len - frag_len_left; @@ -681,7 +701,7 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, len, mss, hdr_len, vlan_tag_insert, vlan_tag, - eop && (len == frag_len_left)); + eop && (len == frag_len_left), loopback); frag_len_left -= len; offset += len; } @@ -707,7 +727,8 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, dma_addr, len, (len_left == 0) && - (len == frag_len_left)); /* EOP? */ + (len == frag_len_left), /* EOP? */ + loopback); frag_len_left -= len; offset += len; } @@ -720,22 +741,26 @@ static inline void enic_queue_wq_skb(struct enic *enic, unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int vlan_tag = 0; int vlan_tag_insert = 0; + int loopback = 0; if (enic->vlan_group && vlan_tx_tag_present(skb)) { /* VLAN tag from trunking driver */ vlan_tag_insert = 1; vlan_tag = vlan_tx_tag_get(skb); + } else if (enic->loop_enable) { + vlan_tag = enic->loop_tag; + loopback = 1; } if (mss) enic_queue_wq_skb_tso(enic, wq, skb, mss, - vlan_tag_insert, vlan_tag); + vlan_tag_insert, vlan_tag, loopback); else if (skb->ip_summed == CHECKSUM_PARTIAL) enic_queue_wq_skb_csum_l4(enic, wq, skb, - vlan_tag_insert, vlan_tag); + vlan_tag_insert, vlan_tag, loopback); else enic_queue_wq_skb_vlan(enic, wq, skb, - vlan_tag_insert, vlan_tag); + vlan_tag_insert, vlan_tag, loopback); } /* netif_tx_lock held, process context with BHs disabled, or BH */ @@ -769,8 +794,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { netif_stop_queue(netdev); /* This is a hard error, log it */ - printk(KERN_ERR PFX "%s: BUG! Tx ring full when " - "queue awake!\n", netdev->name); + netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); spin_unlock_irqrestore(&enic->wq_lock[0], flags); return NETDEV_TX_BUSY; } @@ -792,9 +816,7 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev) struct net_device_stats *net_stats = &netdev->stats; struct vnic_stats *stats; - spin_lock(&enic->devcmd_lock); - vnic_dev_stats_dump(enic->vdev, &stats); - spin_unlock(&enic->devcmd_lock); + enic_dev_stats_dump(enic, &stats); net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; @@ -812,9 +834,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev) return net_stats; } -static void enic_reset_mcaddrs(struct enic *enic) +static void enic_reset_multicast_list(struct enic *enic) { enic->mc_count = 0; + enic->flags = 0; } static int enic_set_mac_addr(struct net_device *netdev, char *addr) @@ -891,6 +914,41 @@ static int enic_set_mac_address(struct net_device *netdev, void *p) return -EOPNOTSUPP; } +static int enic_dev_packet_filter(struct enic *enic, int directed, + int multicast, int broadcast, int promisc, int allmulti) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_packet_filter(enic->vdev, directed, + multicast, broadcast, promisc, allmulti); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_add_addr(enic->vdev, addr); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_del_addr(enic->vdev, addr); + spin_unlock(&enic->devcmd_lock); + + return err; +} + /* netif_tx_lock held, BHs disabled */ static void enic_set_multicast_list(struct net_device *netdev) { @@ -910,11 +968,9 @@ static void enic_set_multicast_list(struct net_device *netdev) if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) mc_count = ENIC_MULTICAST_PERFECT_FILTERS; - spin_lock(&enic->devcmd_lock); - if (enic->flags != flags) { enic->flags = flags; - vnic_dev_packet_filter(enic->vdev, directed, + enic_dev_packet_filter(enic, directed, multicast, broadcast, promisc, allmulti); } @@ -937,7 +993,7 @@ static void enic_set_multicast_list(struct net_device *netdev) mc_addr[j]) == 0) break; if (j == mc_count) - enic_del_multicast_addr(enic, enic->mc_addr[i]); + enic_dev_del_multicast_addr(enic, enic->mc_addr[i]); } for (i = 0; i < mc_count; i++) { @@ -946,7 +1002,7 @@ static void enic_set_multicast_list(struct net_device *netdev) enic->mc_addr[j]) == 0) break; if (j == enic->mc_count) - enic_add_multicast_addr(enic, mc_addr[i]); + enic_dev_add_multicast_addr(enic, mc_addr[i]); } /* Save the list to compare against next time @@ -956,8 +1012,6 @@ static void enic_set_multicast_list(struct net_device *netdev) memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); enic->mc_count = mc_count; - - spin_unlock(&enic->devcmd_lock); } /* rtnl lock is held */ @@ -1226,7 +1280,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) struct enic *enic = vnic_dev_priv(rq->vdev); struct net_device *netdev = enic->netdev; struct sk_buff *skb; - unsigned int len = netdev->mtu + ETH_HLEN; + unsigned int len = netdev->mtu + VLAN_ETH_HLEN; unsigned int os_buf_index = 0; dma_addr_t dma_addr; @@ -1263,12 +1317,24 @@ static int enic_rq_alloc_buf_a1(struct vnic_rq *rq) return 0; } +static int enic_dev_hw_version(struct enic *enic, + enum vnic_dev_hw_version *hw_ver) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_hw_version(enic->vdev, hw_ver); + spin_unlock(&enic->devcmd_lock); + + return err; +} + static int enic_set_rq_alloc_buf(struct enic *enic) { enum vnic_dev_hw_version hw_ver; int err; - err = vnic_dev_hw_version(enic->vdev, &hw_ver); + err = enic_dev_hw_version(enic, &hw_ver); if (err) return err; @@ -1287,51 +1353,6 @@ static int enic_set_rq_alloc_buf(struct enic *enic) return 0; } -static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *priv) -{ - struct cq_enet_rq_desc *cq_desc = priv; - unsigned int ip_len; - struct iphdr *iph; - - u8 type, color, eop, sop, ingress_port, vlan_stripped; - u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; - u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; - u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; - u8 packet_error; - u16 q_number, completed_index, bytes_written, vlan, checksum; - u32 rss_hash; - - cq_enet_rq_desc_dec(cq_desc, - &type, &color, &q_number, &completed_index, - &ingress_port, &fcoe, &eop, &sop, &rss_type, - &csum_not_calc, &rss_hash, &bytes_written, - &packet_error, &vlan_stripped, &vlan, &checksum, - &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, - &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, - &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, - &fcs_ok); - - if (!(ipv4 && tcp && !ipv4_fragment)) - return -1; - - skb_reset_network_header(skb); - iph = ip_hdr(skb); - - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - - /* check if ip header and tcp header are complete */ - if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) - return -1; - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *tcph = tcp_hdr(skb); - *iphdr = iph; - - return 0; -} - static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) @@ -1345,7 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; - u16 q_number, completed_index, bytes_written, vlan, checksum; + u16 q_number, completed_index, bytes_written, vlan_tci, checksum; u32 rss_hash; if (skipped) @@ -1360,7 +1381,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, - &packet_error, &vlan_stripped, &vlan, &checksum, + &packet_error, &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, @@ -1395,20 +1416,20 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, skb->dev = netdev; - if (enic->vlan_group && vlan_stripped) { + if (enic->vlan_group && vlan_stripped && + (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) { - if ((netdev->features & NETIF_F_LRO) && ipv4) - lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, - skb, enic->vlan_group, - vlan, cq_desc); + if (netdev->features & NETIF_F_GRO) + vlan_gro_receive(&enic->napi, enic->vlan_group, + vlan_tci, skb); else vlan_hwaccel_receive_skb(skb, - enic->vlan_group, vlan); + enic->vlan_group, vlan_tci); } else { - if ((netdev->features & NETIF_F_LRO) && ipv4) - lro_receive_skb(&enic->lro_mgr, skb, cq_desc); + if (netdev->features & NETIF_F_GRO) + napi_gro_receive(&enic->napi, skb); else netif_receive_skb(skb); @@ -1438,7 +1459,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, static int enic_poll(struct napi_struct *napi, int budget) { struct enic *enic = container_of(napi, struct enic, napi); - struct net_device *netdev = enic->netdev; unsigned int rq_work_to_do = budget; unsigned int wq_work_to_do = -1; /* no limit */ unsigned int work_done, rq_work_done, wq_work_done; @@ -1478,12 +1498,9 @@ static int enic_poll(struct napi_struct *napi, int budget) if (rq_work_done < rq_work_to_do) { /* Some work done, but not enough to stay in polling, - * flush all LROs and exit polling + * exit polling */ - if (netdev->features & NETIF_F_LRO) - lro_flush_all(&enic->lro_mgr); - napi_complete(napi); vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); } @@ -1494,7 +1511,6 @@ static int enic_poll(struct napi_struct *napi, int budget) static int enic_poll_msix(struct napi_struct *napi, int budget) { struct enic *enic = container_of(napi, struct enic, napi); - struct net_device *netdev = enic->netdev; unsigned int work_to_do = budget; unsigned int work_done; int err; @@ -1528,12 +1544,9 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, - * flush all LROs and exit polling + * exit polling */ - if (netdev->features & NETIF_F_LRO) - lro_flush_all(&enic->lro_mgr); - napi_complete(napi); vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); } @@ -1655,7 +1668,7 @@ static void enic_synchronize_irqs(struct enic *enic) } } -static int enic_notify_set(struct enic *enic) +static int enic_dev_notify_set(struct enic *enic) { int err; @@ -1676,6 +1689,39 @@ static int enic_notify_set(struct enic *enic) return err; } +static int enic_dev_notify_unset(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_notify_unset(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +static int enic_dev_enable(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_enable(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +static int enic_dev_disable(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_disable(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + static void enic_notify_timer_start(struct enic *enic) { switch (vnic_dev_get_intr_mode(enic->vdev)) { @@ -1697,16 +1743,14 @@ static int enic_open(struct net_device *netdev) err = enic_request_intr(enic); if (err) { - printk(KERN_ERR PFX "%s: Unable to request irq.\n", - netdev->name); + netdev_err(netdev, "Unable to request irq.\n"); return err; } - err = enic_notify_set(enic); + err = enic_dev_notify_set(enic); if (err) { - printk(KERN_ERR PFX - "%s: Failed to alloc notify buffer, aborting.\n", - netdev->name); + netdev_err(netdev, + "Failed to alloc notify buffer, aborting.\n"); goto err_out_free_intr; } @@ -1714,9 +1758,7 @@ static int enic_open(struct net_device *netdev) vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[i]) == 0) { - printk(KERN_ERR PFX - "%s: Unable to alloc receive buffers.\n", - netdev->name); + netdev_err(netdev, "Unable to alloc receive buffers\n"); err = -ENOMEM; goto err_out_notify_unset; } @@ -1732,9 +1774,7 @@ static int enic_open(struct net_device *netdev) netif_wake_queue(netdev); napi_enable(&enic->napi); - spin_lock(&enic->devcmd_lock); - vnic_dev_enable(enic->vdev); - spin_unlock(&enic->devcmd_lock); + enic_dev_enable(enic); for (i = 0; i < enic->intr_count; i++) vnic_intr_unmask(&enic->intr[i]); @@ -1744,9 +1784,7 @@ static int enic_open(struct net_device *netdev) return 0; err_out_notify_unset: - spin_lock(&enic->devcmd_lock); - vnic_dev_notify_unset(enic->vdev); - spin_unlock(&enic->devcmd_lock); + enic_dev_notify_unset(enic); err_out_free_intr: enic_free_intr(enic); @@ -1760,20 +1798,19 @@ static int enic_stop(struct net_device *netdev) unsigned int i; int err; - for (i = 0; i < enic->intr_count; i++) + for (i = 0; i < enic->intr_count; i++) { vnic_intr_mask(&enic->intr[i]); + (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ + } enic_synchronize_irqs(enic); del_timer_sync(&enic->notify_timer); - spin_lock(&enic->devcmd_lock); - vnic_dev_disable(enic->vdev); - spin_unlock(&enic->devcmd_lock); + enic_dev_disable(enic); napi_disable(&enic->napi); netif_carrier_off(netdev); netif_tx_disable(netdev); - enic_dev_del_station_addr(enic); for (i = 0; i < enic->wq_count; i++) { @@ -1787,9 +1824,7 @@ static int enic_stop(struct net_device *netdev) return err; } - spin_lock(&enic->devcmd_lock); - vnic_dev_notify_unset(enic->vdev); - spin_unlock(&enic->devcmd_lock); + enic_dev_notify_unset(enic); enic_free_intr(enic); for (i = 0; i < enic->wq_count; i++) @@ -1818,10 +1853,9 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; if (netdev->mtu > enic->port_mtu) - printk(KERN_WARNING PFX - "%s: interface MTU (%d) set higher " - "than port MTU (%d)\n", - netdev->name, netdev->mtu, enic->port_mtu); + netdev_warn(netdev, + "interface MTU (%d) set higher than port MTU (%d)\n", + netdev->mtu, enic->port_mtu); if (running) enic_open(netdev); @@ -1894,21 +1928,21 @@ static int enic_dev_open(struct enic *enic) err = enic_dev_wait(enic->vdev, vnic_dev_open, vnic_dev_open_done, 0); if (err) - printk(KERN_ERR PFX - "vNIC device open failed, err %d.\n", err); + dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", + err); return err; } -static int enic_dev_soft_reset(struct enic *enic) +static int enic_dev_hang_reset(struct enic *enic) { int err; - err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, - vnic_dev_soft_reset_done, 0); + err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, + vnic_dev_hang_reset_done, 0); if (err) - printk(KERN_ERR PFX - "vNIC soft reset failed, err %d.\n", err); + netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", + err); return err; } @@ -1922,15 +1956,43 @@ static int enic_set_niccfg(struct enic *enic) const u8 rss_enable = 0; const u8 tso_ipid_split_en = 0; const u8 ig_vlan_strip_en = 1; + int err; /* Enable VLAN tag stripping. RSS not enabled (yet). */ - return enic_set_nic_cfg(enic, + spin_lock(&enic->devcmd_lock); + err = enic_set_nic_cfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +static int enic_dev_hang_notify(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_hang_notify(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, + IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); + spin_unlock(&enic->devcmd_lock); + + return err; } static void enic_reset(struct work_struct *work) @@ -1942,16 +2004,13 @@ static void enic_reset(struct work_struct *work) rtnl_lock(); - spin_lock(&enic->devcmd_lock); - vnic_dev_hang_notify(enic->vdev); - spin_unlock(&enic->devcmd_lock); - + enic_dev_hang_notify(enic); enic_stop(enic->netdev); - enic_dev_soft_reset(enic); - vnic_dev_init(enic->vdev, 0); - enic_reset_mcaddrs(enic); + enic_dev_hang_reset(enic); + enic_reset_multicast_list(enic); enic_init_vnic_resources(enic); enic_set_niccfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); rtnl_unlock(); @@ -2087,8 +2146,8 @@ static const struct net_device_ops enic_netdev_ops = { .ndo_start_xmit = enic_hard_start_xmit, .ndo_get_stats = enic_get_stats, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = enic_set_multicast_list, .ndo_set_mac_address = enic_set_mac_address, + .ndo_set_multicast_list = enic_set_multicast_list, .ndo_change_mtu = enic_change_mtu, .ndo_vlan_rx_register = enic_vlan_rx_register, .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, @@ -2106,8 +2165,20 @@ void enic_dev_deinit(struct enic *enic) enic_clear_intr_mode(enic); } +static int enic_dev_stats_clear(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_stats_clear(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + int enic_dev_init(struct enic *enic) { + struct device *dev = enic_get_dev(enic); struct net_device *netdev = enic->netdev; int err; @@ -2116,8 +2187,7 @@ int enic_dev_init(struct enic *enic) err = enic_get_vnic_config(enic); if (err) { - printk(KERN_ERR PFX - "Get vNIC configuration failed, aborting.\n"); + dev_err(dev, "Get vNIC configuration failed, aborting\n"); return err; } @@ -2132,9 +2202,8 @@ int enic_dev_init(struct enic *enic) err = enic_set_intr_mode(enic); if (err) { - printk(KERN_ERR PFX - "Failed to set intr mode based on resource " - "counts and system capabilities, aborting.\n"); + dev_err(dev, "Failed to set intr mode based on resource " + "counts and system capabilities, aborting\n"); return err; } @@ -2143,24 +2212,32 @@ int enic_dev_init(struct enic *enic) err = enic_alloc_vnic_resources(enic); if (err) { - printk(KERN_ERR PFX - "Failed to alloc vNIC resources, aborting.\n"); + dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); goto err_out_free_vnic_resources; } enic_init_vnic_resources(enic); + /* Clear LIF stats + */ + enic_dev_stats_clear(enic); + err = enic_set_rq_alloc_buf(enic); if (err) { - printk(KERN_ERR PFX - "Failed to set RQ buffer allocator, aborting.\n"); + dev_err(dev, "Failed to set RQ buffer allocator, aborting\n"); goto err_out_free_vnic_resources; } err = enic_set_niccfg(enic); if (err) { - printk(KERN_ERR PFX - "Failed to config nic, aborting.\n"); + dev_err(dev, "Failed to config nic, aborting\n"); + goto err_out_free_vnic_resources; + } + + err = enic_dev_set_ig_vlan_rewrite_mode(enic); + if (err) { + netdev_err(netdev, + "Failed to set ingress vlan rewrite mode, aborting.\n"); goto err_out_free_vnic_resources; } @@ -2194,6 +2271,7 @@ static void enic_iounmap(struct enic *enic) static int __devinit enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct device *dev = &pdev->dev; struct net_device *netdev; struct enic *enic; int using_dac = 0; @@ -2206,7 +2284,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, netdev = alloc_etherdev(sizeof(struct enic)); if (!netdev) { - printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); + pr_err("Etherdev alloc failed, aborting\n"); return -ENOMEM; } @@ -2221,17 +2299,15 @@ static int __devinit enic_probe(struct pci_dev *pdev, /* Setup PCI resources */ - err = pci_enable_device(pdev); + err = pci_enable_device_mem(pdev); if (err) { - printk(KERN_ERR PFX - "Cannot enable PCI device, aborting.\n"); + dev_err(dev, "Cannot enable PCI device, aborting\n"); goto err_out_free_netdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { - printk(KERN_ERR PFX - "Cannot request PCI regions, aborting.\n"); + dev_err(dev, "Cannot request PCI regions, aborting\n"); goto err_out_disable_device; } @@ -2246,23 +2322,20 @@ static int __devinit enic_probe(struct pci_dev *pdev, if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - printk(KERN_ERR PFX - "No usable DMA configuration, aborting.\n"); + dev_err(dev, "No usable DMA configuration, aborting\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - printk(KERN_ERR PFX - "Unable to obtain 32-bit DMA " - "for consistent allocations, aborting.\n"); + dev_err(dev, "Unable to obtain %u-bit DMA " + "for consistent allocations, aborting\n", 32); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { - printk(KERN_ERR PFX - "Unable to obtain 40-bit DMA " - "for consistent allocations, aborting.\n"); + dev_err(dev, "Unable to obtain %u-bit DMA " + "for consistent allocations, aborting\n", 40); goto err_out_release_regions; } using_dac = 1; @@ -2277,8 +2350,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, enic->bar[i].len = pci_resource_len(pdev, i); enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); if (!enic->bar[i].vaddr) { - printk(KERN_ERR PFX - "Cannot memory-map BAR %d, aborting.\n", i); + dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); err = -ENODEV; goto err_out_iounmap; } @@ -2291,8 +2363,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, ARRAY_SIZE(enic->bar)); if (!enic->vdev) { - printk(KERN_ERR PFX - "vNIC registration failed, aborting.\n"); + dev_err(dev, "vNIC registration failed, aborting\n"); err = -ENODEV; goto err_out_iounmap; } @@ -2302,8 +2373,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, err = enic_dev_open(enic); if (err) { - printk(KERN_ERR PFX - "vNIC dev open failed, aborting.\n"); + dev_err(dev, "vNIC dev open failed, aborting\n"); goto err_out_vnic_unregister; } @@ -2317,23 +2387,31 @@ static int __devinit enic_probe(struct pci_dev *pdev, netif_carrier_off(netdev); + /* Do not call dev_init for a dynamic vnic. + * For a dynamic vnic, init_prov_info will be + * called later by an upper layer. + */ + if (!enic_is_dynamic(enic)) { err = vnic_dev_init(enic->vdev, 0); if (err) { - printk(KERN_ERR PFX - "vNIC dev init failed, aborting.\n"); + dev_err(dev, "vNIC dev init failed, aborting\n"); goto err_out_dev_close; } } + /* Setup devcmd lock + */ + + spin_lock_init(&enic->devcmd_lock); + err = enic_dev_init(enic); if (err) { - printk(KERN_ERR PFX - "Device initialization failed, aborting.\n"); + dev_err(dev, "Device initialization failed, aborting\n"); goto err_out_dev_close; } - /* Setup notification timer, HW reset task, and locks + /* Setup notification timer, HW reset task, and wq locks */ init_timer(&enic->notify_timer); @@ -2345,8 +2423,6 @@ static int __devinit enic_probe(struct pci_dev *pdev, for (i = 0; i < enic->wq_count; i++) spin_lock_init(&enic->wq_lock[i]); - spin_lock_init(&enic->devcmd_lock); - /* Register net device */ @@ -2355,8 +2431,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { - printk(KERN_ERR PFX - "Invalid MAC address, aborting.\n"); + dev_err(dev, "Invalid MAC address, aborting\n"); goto err_out_dev_deinit; } @@ -2372,31 +2447,27 @@ static int __devinit enic_probe(struct pci_dev *pdev, netdev->ethtool_ops = &enic_ethtool_ops; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + if (ENIC_SETTING(enic, LOOP)) { + netdev->features &= ~NETIF_F_HW_VLAN_TX; + enic->loop_enable = 1; + enic->loop_tag = enic->config.loop_tag; + dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); + } if (ENIC_SETTING(enic, TXCSUM)) netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; if (ENIC_SETTING(enic, TSO)) netdev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; if (ENIC_SETTING(enic, LRO)) - netdev->features |= NETIF_F_LRO; + netdev->features |= NETIF_F_GRO; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); - enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; - enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; - enic->lro_mgr.lro_arr = enic->lro_desc; - enic->lro_mgr.get_skb_header = enic_get_skb_header; - enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - enic->lro_mgr.dev = netdev; - enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; - enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; - err = register_netdev(netdev); if (err) { - printk(KERN_ERR PFX - "Cannot register net device, aborting.\n"); + dev_err(dev, "Cannot register net device, aborting\n"); goto err_out_dev_deinit; } @@ -2450,7 +2521,7 @@ static struct pci_driver enic_driver = { static int __init enic_init_module(void) { - printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); return pci_register_driver(&enic_driver); } diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index 9b18840cba969aa0a3162a79298c4b1713926c11..29ede8a17a2ca5a7d99e140c0a8e0c8f43848314 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -46,7 +46,8 @@ int enic_get_vnic_config(struct enic *enic) err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr); if (err) { - printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err); + dev_err(enic_get_dev(enic), + "Error getting MAC addr, %d\n", err); return err; } @@ -56,7 +57,7 @@ int enic_get_vnic_config(struct enic *enic) offsetof(struct vnic_enet_config, m), \ sizeof(c->m), &c->m); \ if (err) { \ - printk(KERN_ERR PFX \ + dev_err(enic_get_dev(enic), \ "Error getting %s, %d\n", #m, err); \ return err; \ } \ @@ -69,6 +70,7 @@ int enic_get_vnic_config(struct enic *enic) GET_CONFIG(intr_timer_type); GET_CONFIG(intr_mode); GET_CONFIG(intr_timer_usec); + GET_CONFIG(loop_tag); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, @@ -92,10 +94,10 @@ int enic_get_vnic_config(struct enic *enic) INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), c->intr_timer_usec); - printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n", + dev_info(enic_get_dev(enic), "vNIC MAC addr %pM wq/rq %d/%d\n", enic->mac_addr, c->wq_desc_count, c->rq_desc_count); - printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d " - "intr timer %d usec\n", + dev_info(enic_get_dev(enic), "vNIC mtu %d csum tx/rx %d/%d " + "tso/lro %d/%d intr timer %d usec\n", c->mtu, ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO), ENIC_SETTING(enic, LRO), c->intr_timer_usec); @@ -103,17 +105,7 @@ int enic_get_vnic_config(struct enic *enic) return 0; } -void enic_add_multicast_addr(struct enic *enic, u8 *addr) -{ - vnic_dev_add_addr(enic->vdev, addr); -} - -void enic_del_multicast_addr(struct enic *enic, u8 *addr) -{ - vnic_dev_del_addr(enic->vdev, addr); -} - -void enic_add_vlan(struct enic *enic, u16 vlanid) +int enic_add_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; @@ -121,10 +113,12 @@ void enic_add_vlan(struct enic *enic, u16 vlanid) err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); if (err) - printk(KERN_ERR PFX "Can't add vlan id, %d\n", err); + dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err); + + return err; } -void enic_del_vlan(struct enic *enic, u16 vlanid) +int enic_del_vlan(struct enic *enic, u16 vlanid) { u64 a0 = vlanid, a1 = 0; int wait = 1000; @@ -132,7 +126,9 @@ void enic_del_vlan(struct enic *enic, u16 vlanid) err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); if (err) - printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err); + dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err); + + return err; } int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, @@ -198,8 +194,8 @@ void enic_get_res_counts(struct enic *enic) vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL), ENIC_INTR_MAX); - printk(KERN_INFO PFX "vNIC resources avail: " - "wq %d rq %d cq %d intr %d\n", + dev_info(enic_get_dev(enic), + "vNIC resources avail: wq %d rq %d cq %d intr %d\n", enic->wq_count, enic->rq_count, enic->cq_count, enic->intr_count); } @@ -304,11 +300,6 @@ void enic_init_vnic_resources(struct enic *enic) enic->config.intr_timer_type, mask_on_assertion); } - - /* Clear LIF stats - */ - - vnic_dev_stats_clear(enic->vdev); } int enic_alloc_vnic_resources(struct enic *enic) @@ -319,15 +310,14 @@ int enic_alloc_vnic_resources(struct enic *enic) intr_mode = vnic_dev_get_intr_mode(enic->vdev); - printk(KERN_INFO PFX "vNIC resources used: " + dev_info(enic_get_dev(enic), "vNIC resources used: " "wq %d rq %d cq %d intr %d intr mode %s\n", enic->wq_count, enic->rq_count, enic->cq_count, enic->intr_count, intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : - "unknown" - ); + "unknown"); /* Allocate queue resources */ @@ -373,7 +363,8 @@ int enic_alloc_vnic_resources(struct enic *enic) enic->legacy_pba = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_PBA_LEGACY, 0); if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { - printk(KERN_ERR PFX "Failed to hook legacy pba resource\n"); + dev_err(enic_get_dev(enic), + "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h index 494664f7fcccec916e865d7c7e612efd8481d766..83bd172c356cf329db232831d42cd3dd092f6a6d 100644 --- a/drivers/net/enic/enic_res.h +++ b/drivers/net/enic/enic_res.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -43,7 +43,7 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss_or_csum_offset, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, - int offload_mode, int cq_entry, int sop, int eop) + int offload_mode, int cq_entry, int sop, int eop, int loopback) { struct wq_enet_desc *desc = vnic_wq_next_desc(wq); @@ -56,61 +56,62 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, 0, /* fcoe_encap */ (u8)vlan_tag_insert, (u16)vlan_tag, - 0 /* loopback */); + (u8)loopback); vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); } static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop) + void *os_buf, dma_addr_t dma_addr, unsigned int len, + int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 0, 0, 0, 0, 0, - eop, 0 /* !SOP */, eop); + eop, 0 /* !SOP */, eop, loopback); } static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, - unsigned int vlan_tag, int eop) + unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 0, 0, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_CSUM, - eop, 1 /* SOP */, eop); + eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int ip_csum, int tcpudp_csum, int vlan_tag_insert, - unsigned int vlan_tag, int eop) + unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), 0, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_CSUM, - eop, 1 /* SOP */, eop); + eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int csum_offset, unsigned int hdr_len, - int vlan_tag_insert, unsigned int vlan_tag, int eop) + int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, csum_offset, hdr_len, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_CSUM_L4, - eop, 1 /* SOP */, eop); + eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, - unsigned int vlan_tag, int eop) + unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, mss, hdr_len, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_TSO, - eop, 1 /* SOP */, eop); + eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_rq_desc(struct vnic_rq *rq, @@ -131,10 +132,8 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq, struct enic; int enic_get_vnic_config(struct enic *); -void enic_add_multicast_addr(struct enic *enic, u8 *addr); -void enic_del_multicast_addr(struct enic *enic, u8 *addr); -void enic_add_vlan(struct enic *enic, u16 vlanid); -void enic_del_vlan(struct enic *enic, u16 vlanid); +int enic_add_vlan(struct enic *enic, u16 vlanid); +int enic_del_vlan(struct enic *enic, u16 vlanid); int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en); diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h index a06e649010cee0d061fcf5f1e3eab8587e36c30d..e6dd30988d6f5752dca6e72bf8ae100f23359d17 100644 --- a/drivers/net/enic/rq_enet_desc.h +++ b/drivers/net/enic/rq_enet_desc.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c index 020ae6c3f3d9586a803fbeea28416a643fca733f..b86d6ef8dad38bf97db0c3cceefc09423d196bf1 100644 --- a/drivers/net/enic/vnic_cq.c +++ b/drivers/net/enic/vnic_cq.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -42,7 +42,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { - printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index); + pr_err("Failed to hook CQ[%d] resource\n", index); return -EINVAL; } diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h index 114763cbc2f8bc12e0c19241ac9569fa62d5b8f2..552d3daf250812bd839f77615e4203660f3eb096 100644 --- a/drivers/net/enic/vnic_cq.h +++ b/drivers/net/enic/vnic_cq.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c index e0d33281ec980b6013dea7c1632b19358af7d30c..6a5b578a69e169501f27eca1ec31262b3fc0d3d2 100644 --- a/drivers/net/enic/vnic_dev.c +++ b/drivers/net/enic/vnic_dev.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -23,21 +23,23 @@ #include #include #include -#include #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_dev.h" #include "vnic_stats.h" +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, +}; + struct vnic_res { void __iomem *vaddr; dma_addr_t bus_addr; unsigned int count; }; -#define VNIC_DEV_CAP_INIT 0x0001 - struct vnic_dev { void *priv; struct pci_dev *pdev; @@ -48,13 +50,14 @@ struct vnic_dev { struct vnic_devcmd_notify notify_copy; dma_addr_t notify_pa; u32 notify_sz; - u32 *linkstatus; dma_addr_t linkstatus_pa; struct vnic_stats *stats; dma_addr_t stats_pa; struct vnic_devcmd_fw_info *fw_info; dma_addr_t fw_info_pa; - u32 cap_flags; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; }; #define VNIC_MAX_RES_HDR_SIZE \ @@ -78,19 +81,19 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, return -EINVAL; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { - printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); + pr_err("vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; if (!rh) { - printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); + pr_err("vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } if (ioread32(&rh->magic) != VNIC_RES_MAGIC || ioread32(&rh->version) != VNIC_RES_VERSION) { - printk(KERN_ERR "vNIC BAR0 res magic/version error " + pr_err("vNIC BAR0 res magic/version error " "exp (%lx/%lx) curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, ioread32(&rh->magic), ioread32(&rh->version)); @@ -122,7 +125,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar[bar_num].len) { - printk(KERN_ERR "vNIC BAR0 resource %d " + pr_err("vNIC BAR0 resource %d " "out-of-bounds, offset 0x%x + " "size 0x%x > bar len 0x%lx\n", type, bar_offset, @@ -229,8 +232,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { - printk(KERN_ERR - "Failed to allocate ring (size=%d), aborting\n", + pr_err("Failed to allocate ring (size=%d), aborting\n", (int)ring->size); return -ENOMEM; } @@ -258,23 +260,28 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) } } -int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, - u64 *a0, u64 *a1, int wait) +static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; + unsigned int i; int delay; u32 status; int err; status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } if (status & STAT_BUSY) { - printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); + pr_err("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { - writeq(*a0, &devcmd->args[0]); - writeq(*a1, &devcmd->args[1]); + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + writeq(vdev->args[i], &devcmd->args[i]); wmb(); } @@ -288,31 +295,110 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, udelay(100); status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } + if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = (int)readq(&devcmd->args[0]); if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - printk(KERN_ERR "Error %d devcmd %d\n", + pr_err("Error %d devcmd %d\n", err, _CMD_N(cmd)); return err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); - *a0 = readq(&devcmd->args[0]); - *a1 = readq(&devcmd->args[1]); + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = readq(&devcmd->args[i]); } return 0; } } - printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); + pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; } +static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + u32 status; + int err; + + memset(vdev->args, 0, sizeof(vdev->args)); + + vdev->args[0] = vdev->proxy_index; /* bdf */ + vdev->args[1] = cmd; + vdev->args[2] = *a0; + vdev->args[3] = *a1; + + err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait); + if (err) + return err; + + status = (u32)vdev->args[0]; + if (status & STAT_ERROR) { + err = (int)vdev->args[1]; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); + return err; + } + + *a0 = vdev->args[1]; + *a1 = vdev->args[2]; + + return 0; +} + +static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + int err; + + vdev->args[0] = *a0; + vdev->args[1] = *a1; + + err = _vnic_dev_cmd(vdev, cmd, wait); + + *a0 = vdev->args[0]; + *a1 = vdev->args[1]; + + return err; +} + +void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf) +{ + vdev->proxy = PROXY_BY_BDF; + vdev->proxy_index = bdf; +} + +void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) +{ + vdev->proxy = PROXY_NONE; + vdev->proxy_index = 0; +} + +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + memset(vdev->args, 0, sizeof(vdev->args)); + + switch (vdev->proxy) { + case PROXY_BY_BDF: + return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait); + case PROXY_NONE: + default: + return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); + } +} + static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) { u64 a0 = (u32)cmd, a1 = 0; @@ -431,6 +517,19 @@ int vnic_dev_enable(struct vnic_dev *vdev) return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); } +int vnic_dev_enable_wait(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); + if (err == ERR_ECMDUNKNOWN) + return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); + + return err; +} + int vnic_dev_disable(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; @@ -486,6 +585,44 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) return 0; } +int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait); + if (err == ERR_ECMDUNKNOWN) { + err = vnic_dev_soft_reset(vdev, arg); + if (err) + return err; + + return vnic_dev_init(vdev, 0); + } + + return err; +} + +int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait); + if (err) { + if (err == ERR_ECMDUNKNOWN) + return vnic_dev_soft_reset_done(vdev, done); + return err; + } + + *done = (a0 == 0); + + return 0; +} + int vnic_dev_hang_notify(struct vnic_dev *vdev) { u64 a0, a1; @@ -512,7 +649,7 @@ int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) return 0; } -void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti) { u64 a0, a1 = 0; @@ -527,7 +664,29 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) - printk(KERN_ERR "Can't set packet filter\n"); + pr_err("Can't set packet filter\n"); + + return err; +} + +int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed, + int multicast, int broadcast, int promisc, int allmulti) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err; + + a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | + (multicast ? CMD_PFILTER_MULTICAST : 0) | + (broadcast ? CMD_PFILTER_BROADCAST : 0) | + (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | + (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); + + err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait); + if (err) + pr_err("Can't set packet filter\n"); + + return err; } int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) @@ -542,7 +701,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) - printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err); + pr_err("Can't add addr [%pM], %d\n", addr, err); return err; } @@ -559,7 +718,21 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) - printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err); + pr_err("Can't del addr [%pM], %d\n", addr, err); + + return err; +} + +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + u8 ig_vlan_rewrite_mode) +{ + u64 a0 = ig_vlan_rewrite_mode, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait); + if (err == ERR_ECMDUNKNOWN) + return 0; return err; } @@ -572,8 +745,7 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr) err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait); if (err) - printk(KERN_ERR "Failed to raise INTR[%d], err %d\n", - intr, err); + pr_err("Failed to raise INTR[%d], err %d\n", intr, err); return err; } @@ -604,8 +776,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) dma_addr_t notify_pa; if (vdev->notify || vdev->notify_pa) { - printk(KERN_ERR "notify block %p still allocated", - vdev->notify); + pr_err("notify block %p still allocated", vdev->notify); return -EINVAL; } @@ -618,22 +789,25 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); } -void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) +int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; + int err; a0 = 0; /* paddr = 0 to unset notify buffer */ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ a1 += sizeof(struct vnic_devcmd_notify); - vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); vdev->notify = NULL; vdev->notify_pa = 0; vdev->notify_sz = 0; + + return err; } -void vnic_dev_notify_unset(struct vnic_dev *vdev) +int vnic_dev_notify_unset(struct vnic_dev *vdev) { if (vdev->notify) { pci_free_consistent(vdev->pdev, @@ -642,7 +816,7 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev) vdev->notify_pa); } - vnic_dev_notify_unsetcmd(vdev); + return vnic_dev_notify_unsetcmd(vdev); } static int vnic_dev_notify_ready(struct vnic_dev *vdev) @@ -672,13 +846,14 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg) int wait = 1000; int r = 0; - if (vdev->cap_flags & VNIC_DEV_CAP_INIT) + if (vnic_dev_capable(vdev, CMD_INIT)) r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); else { vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); if (a0 & CMD_INITF_DEFAULT_MAC) { - // Emulate these for old CMD_INIT_v1 which - // didn't pass a0 so no CMD_INITF_*. + /* Emulate these for old CMD_INIT_v1 which + * didn't pass a0 so no CMD_INITF_*. + */ vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); } @@ -700,7 +875,7 @@ int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err) *done = (a0 == 0); - *err = (a0 == 0) ? a1 : 0; + *err = (a0 == 0) ? (int)a1:0; return 0; } @@ -738,9 +913,6 @@ int vnic_dev_deinit(struct vnic_dev *vdev) int vnic_dev_link_status(struct vnic_dev *vdev) { - if (vdev->linkstatus) - return *vdev->linkstatus; - if (!vnic_dev_notify_ready(vdev)) return 0; @@ -787,6 +959,14 @@ u32 vnic_dev_notify_status(struct vnic_dev *vdev) return vdev->notify_copy.status; } +u32 vnic_dev_uif(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.uif; +} + void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode) { @@ -807,14 +987,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev) sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); - if (vdev->linkstatus) - pci_free_consistent(vdev->pdev, - sizeof(u32), - vdev->linkstatus, - vdev->linkstatus_pa); if (vdev->stats) pci_free_consistent(vdev->pdev, - sizeof(struct vnic_dev), + sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) pci_free_consistent(vdev->pdev, @@ -844,11 +1019,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, if (!vdev->devcmd) goto err_out; - vdev->cap_flags = 0; - - if (vnic_dev_capable(vdev, CMD_INIT)) - vdev->cap_flags |= VNIC_DEV_CAP_INIT; - return vdev; err_out: diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h index caccce36957b4695a0126d2df6050cadb3271249..3a61873138b69112b28f60de2a70fa11f2dc052c 100644 --- a/drivers/net/enic/vnic_dev.h +++ b/drivers/net/enic/vnic_dev.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -41,6 +41,9 @@ static inline void writeq(u64 val, void __iomem *reg) } #endif +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + enum vnic_dev_hw_version { VNIC_DEV_HW_VER_UNKNOWN, VNIC_DEV_HW_VER_A1, @@ -92,6 +95,8 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring); int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait); +void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf); +void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev); int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info); int vnic_dev_hw_version(struct vnic_dev *vdev, @@ -101,8 +106,10 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, int vnic_dev_stats_clear(struct vnic_dev *vdev); int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); int vnic_dev_hang_notify(struct vnic_dev *vdev); -void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti); +int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed, + int multicast, int broadcast, int promisc, int allmulti); int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); @@ -110,16 +117,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); int vnic_dev_notify_setcmd(struct vnic_dev *vdev, void *notify_addr, dma_addr_t notify_pa, u16 intr); int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); -void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); -void vnic_dev_notify_unset(struct vnic_dev *vdev); +int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); +int vnic_dev_notify_unset(struct vnic_dev *vdev); int vnic_dev_link_status(struct vnic_dev *vdev); u32 vnic_dev_port_speed(struct vnic_dev *vdev); u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); u32 vnic_dev_mtu(struct vnic_dev *vdev); u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); u32 vnic_dev_notify_status(struct vnic_dev *vdev); +u32 vnic_dev_uif(struct vnic_dev *vdev); int vnic_dev_close(struct vnic_dev *vdev); int vnic_dev_enable(struct vnic_dev *vdev); +int vnic_dev_enable_wait(struct vnic_dev *vdev); int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); @@ -129,10 +138,14 @@ int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_deinit(struct vnic_dev *vdev); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); +int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode); enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); void vnic_dev_unregister(struct vnic_dev *vdev); +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + u8 ig_vlan_rewrite_mode); struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, unsigned int num_bars); diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h index d78bbcc1fdf90b9d822b9ddc1aca92e5c8545599..20661755df6bf4c96f9209efe4767a53c9217cdd 100644 --- a/drivers/net/enic/vnic_devcmd.h +++ b/drivers/net/enic/vnic_devcmd.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -98,6 +98,9 @@ enum vnic_devcmd_cmd { /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), + /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7), + /* hang detection notification */ CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), @@ -171,6 +174,9 @@ enum vnic_devcmd_cmd { /* enable virtual link */ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + /* enable virtual link, waiting variant. */ + CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + /* disable virtual link */ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), @@ -211,6 +217,27 @@ enum vnic_devcmd_cmd { * in: (u16)a0=interrupt number to assert */ CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), }; /* flags for CMD_OPEN */ @@ -226,6 +253,12 @@ enum vnic_devcmd_cmd { #define CMD_PFILTER_PROMISCUOUS 0x08 #define CMD_PFILTER_ALL_MULTICAST 0x10 +/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */ +#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0 +#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1 +#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2 +#define IG_VLAN_REWRITE_MODE_PASS_THRU 3 + enum vnic_devcmd_status { STAT_NONE = 0, STAT_BUSY = 1 << 0, /* cmd in progress */ diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h index 8eeb6758491bcb3e065839ccb3b8f4ea7100e77b..3b3291248956a4d01848b635bf294f2622c175e7 100644 --- a/drivers/net/enic/vnic_enet.h +++ b/drivers/net/enic/vnic_enet.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -35,6 +35,7 @@ struct vnic_enet_config { u8 intr_mode; char devname[16]; u32 intr_timer_usec; + u16 loop_tag; }; #define VENETF_TSO 0x1 /* TSO enabled */ @@ -48,5 +49,6 @@ struct vnic_enet_config { #define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */ #define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ #define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ +#define VENETF_LOOP 0x800 /* Loopback enabled */ #endif /* _VNIC_ENIC_H_ */ diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c index 3934309a9498b69527f8c33068831b123c5efa78..52ab61af2750fa9287d53f3c8e4dcbec84dca0b5 100644 --- a/drivers/net/enic/vnic_intr.c +++ b/drivers/net/enic/vnic_intr.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -39,8 +39,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { - printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n", - index); + pr_err("Failed to hook INTR[%d].ctrl resource\n", index); return -EINVAL; } diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h index 2fe6c6339e3c58a54eb9f205ac34ba6d625f4199..09dc0b73ff46aec338fc0478a52d85955ea350ba 100644 --- a/drivers/net/enic/vnic_intr.h +++ b/drivers/net/enic/vnic_intr.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -61,7 +61,11 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr) static inline void vnic_intr_mask(struct vnic_intr *intr) { iowrite32(1, &intr->ctrl->mask); - (void)ioread32(&intr->ctrl->mask); +} + +static inline int vnic_intr_masked(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->mask); } static inline void vnic_intr_return_credits(struct vnic_intr *intr, diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h index cf80ab46d582ac73a94ff59766c6fdf0a379cdef..995a50dd4c99ca926769e8caeb1a5fbd51c22d9b 100644 --- a/drivers/net/enic/vnic_nic.h +++ b/drivers/net/enic/vnic_nic.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h index b61c22aec41a65968c887fa800b7062bf59dd254..810287beff149f2969e0bda67b28754d600555e9 100644 --- a/drivers/net/enic/vnic_resource.h +++ b/drivers/net/enic/vnic_resource.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c index cc580cfec41dd8baae417347d7604380623179a0..dbb2aca258b9087ed1734dd2b7da61e478702303 100644 --- a/drivers/net/enic/vnic_rq.c +++ b/drivers/net/enic/vnic_rq.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -37,23 +37,23 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq) vdev = rq->vdev; for (i = 0; i < blks; i++) { - rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); + rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); if (!rq->bufs[i]) { - printk(KERN_ERR "Failed to alloc rq_bufs\n"); + pr_err("Failed to alloc rq_bufs\n"); return -ENOMEM; } } for (i = 0; i < blks; i++) { buf = rq->bufs[i]; - for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) { - buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j; + for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) { + buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j; buf->desc = (u8 *)rq->ring.descs + rq->ring.desc_size * buf->index; if (buf->index + 1 == count) { buf->next = rq->bufs[0]; break; - } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) { + } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) { buf->next = rq->bufs[i + 1]; } else { buf->next = buf + 1; @@ -94,7 +94,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); if (!rq->ctrl) { - printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index); + pr_err("Failed to hook RQ[%d] resource\n", index); return -EINVAL; } @@ -119,10 +119,11 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_offset) { u64 paddr; + unsigned int count = rq->ring.desc_count; paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &rq->ctrl->ring_base); - iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); + iowrite32(count, &rq->ctrl->ring_size); iowrite32(cq_index, &rq->ctrl->cq_index); iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); @@ -132,8 +133,8 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, iowrite32(posted_index, &rq->ctrl->posted_index); rq->to_use = rq->to_clean = - &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] - [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; } void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, @@ -145,6 +146,11 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } + vnic_rq_init_start(rq, cq_index, fetch_index, fetch_index, error_interrupt_enable, @@ -174,7 +180,7 @@ int vnic_rq_disable(struct vnic_rq *rq) udelay(10); } - printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); + pr_err("Failed to disable RQ[%d]\n", rq->index); return -ETIMEDOUT; } @@ -184,8 +190,7 @@ void vnic_rq_clean(struct vnic_rq *rq, { struct vnic_rq_buf *buf; u32 fetch_index; - - BUG_ON(ioread32(&rq->ctrl->enable)); + unsigned int count = rq->ring.desc_count; buf = rq->to_clean; @@ -199,9 +204,14 @@ void vnic_rq_clean(struct vnic_rq *rq, /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } rq->to_use = rq->to_clean = - &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] - [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; iowrite32(fetch_index, &rq->ctrl->posted_index); vnic_dev_clear_desc_ring(&rq->ring); diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h index 35e736cc2d887743c60959076110fa269e7d28ff..2dc48f91abf71dc4da9d00f863f6766dd58b6349 100644 --- a/drivers/net/enic/vnic_rq.h +++ b/drivers/net/enic/vnic_rq.h @@ -1,5 +1,5 @@ /* - * Copyright 2008, 2009 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -52,12 +52,16 @@ struct vnic_rq_ctrl { u32 pad10; }; -/* Break the vnic_rq_buf allocations into blocks of 64 entries */ -#define VNIC_RQ_BUF_BLK_ENTRIES 64 -#define VNIC_RQ_BUF_BLK_SZ \ - (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) +/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */ +#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32 +#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64 +#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ + ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ + VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES)) +#define VNIC_RQ_BUF_BLK_SZ(entries) \ + (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ - DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) + DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) struct vnic_rq_buf { diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h index 5fbb3c923bcd6a0610bacda69cdc7694d4b507b4..f62d1871962983bfce011fdbb4097a3421daf9a0 100644 --- a/drivers/net/enic/vnic_rss.h +++ b/drivers/net/enic/vnic_rss.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h index 9ff9614d89b122f4f653bcbdbbc4c13549fd2352..77750ec93954c31da439bacfc33870d66134da66 100644 --- a/drivers/net/enic/vnic_stats.h +++ b/drivers/net/enic/vnic_stats.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c index d769772998c6331e026393279c3d8997347fc8e8..197c9d24af824552a62622ec3aebfc39ec9b02cf 100644 --- a/drivers/net/enic/vnic_vic.c +++ b/drivers/net/enic/vnic_vic.c @@ -25,9 +25,13 @@ struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type) { - struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags); + struct vic_provinfo *vp; - if (!vp || !oui) + if (!oui) + return NULL; + + vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags); + if (!vp) return NULL; memcpy(vp->oui, oui, sizeof(vp->oui)); diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h index 085c2a274cb14162406cbd98a0fe836710c1eaaa..7e46e5e8600fedba6a2d7d6bcbf1276c279846cc 100644 --- a/drivers/net/enic/vnic_vic.h +++ b/drivers/net/enic/vnic_vic.h @@ -44,7 +44,7 @@ struct vic_provinfo { u16 length; u8 value[0]; } tlv[0]; -} __attribute__ ((packed)); +} __packed; #define VIC_PROVINFO_MAX_DATA 1385 #define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c index 1378afbdfe67b150ee172f6216f4af13fbd5c2ae..122e33bcc57826a1ef08abf0337a3b97da014570 100644 --- a/drivers/net/enic/vnic_wq.c +++ b/drivers/net/enic/vnic_wq.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -37,23 +37,23 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq) vdev = wq->vdev; for (i = 0; i < blks; i++) { - wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); if (!wq->bufs[i]) { - printk(KERN_ERR "Failed to alloc wq_bufs\n"); + pr_err("Failed to alloc wq_bufs\n"); return -ENOMEM; } } for (i = 0; i < blks; i++) { buf = wq->bufs[i]; - for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { - buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; + for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { + buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; buf->desc = (u8 *)wq->ring.descs + wq->ring.desc_size * buf->index; if (buf->index + 1 == count) { buf->next = wq->bufs[0]; break; - } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { + } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { buf->next = wq->bufs[i + 1]; } else { buf->next = buf + 1; @@ -94,7 +94,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { - printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index); + pr_err("Failed to hook WQ[%d] resource\n", index); return -EINVAL; } @@ -119,10 +119,11 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_offset) { u64 paddr; + unsigned int count = wq->ring.desc_count; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &wq->ctrl->ring_base); - iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); + iowrite32(count, &wq->ctrl->ring_size); iowrite32(fetch_index, &wq->ctrl->fetch_index); iowrite32(posted_index, &wq->ctrl->posted_index); iowrite32(cq_index, &wq->ctrl->cq_index); @@ -131,8 +132,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, iowrite32(0, &wq->ctrl->error_status); wq->to_use = wq->to_clean = - &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES] - [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES]; + &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, @@ -167,7 +168,7 @@ int vnic_wq_disable(struct vnic_wq *wq) udelay(10); } - printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); + pr_err("Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } @@ -177,8 +178,6 @@ void vnic_wq_clean(struct vnic_wq *wq, { struct vnic_wq_buf *buf; - BUG_ON(ioread32(&wq->ctrl->enable)); - buf = wq->to_clean; while (vnic_wq_desc_used(wq) > 0) { diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h index 9c34d41a887e649a1b17924fa43f43c759b4584f..94ac4621acc5ac945a7df33acc2894906d3f34ce 100644 --- a/drivers/net/enic/vnic_wq.h +++ b/drivers/net/enic/vnic_wq.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify @@ -60,12 +60,16 @@ struct vnic_wq_buf { void *desc; }; -/* Break the vnic_wq_buf allocations into blocks of 64 entries */ -#define VNIC_WQ_BUF_BLK_ENTRIES 64 -#define VNIC_WQ_BUF_BLK_SZ \ - (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) +/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ +#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 +#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 +#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ + ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ + VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) +#define VNIC_WQ_BUF_BLK_SZ(entries) \ + (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ - DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) + DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) struct vnic_wq { diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h index 483596c2d8bf8b41deff7be9a9fefd3c7063b189..c7021e3a631fe2f487c47ba282793481742a9def 100644 --- a/drivers/net/enic/wq_enet_desc.h +++ b/drivers/net/enic/wq_enet_desc.h @@ -1,5 +1,5 @@ /* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 4c274657283c36ad2157d5874447e3c434483869..57c8ac0ef3f1232d41e4f00399eb27b18170dcf6 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -278,7 +278,6 @@ struct epic_private { struct pci_dev *pci_dev; /* PCI bus location. */ int chip_id, chip_flags; - struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ int tx_threshold; unsigned char mc_filter[8]; @@ -770,7 +769,6 @@ static int epic_open(struct net_device *dev) static void epic_pause(struct net_device *dev) { long ioaddr = dev->base_addr; - struct epic_private *ep = netdev_priv(dev); netif_stop_queue (dev); @@ -781,9 +779,9 @@ static void epic_pause(struct net_device *dev) /* Update the error counts. */ if (inw(ioaddr + COMMAND) != 0xffff) { - ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); - ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); - ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); + dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); + dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); } /* Remove the packets on the Rx queue. */ @@ -900,7 +898,7 @@ static void epic_tx_timeout(struct net_device *dev) } } if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ - ep->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; outl(RestartTx, ioaddr + COMMAND); } else { epic_restart(dev); @@ -908,7 +906,7 @@ static void epic_tx_timeout(struct net_device *dev) } dev->trans_start = jiffies; /* prevent tx timeout */ - ep->stats.tx_errors++; + dev->stats.tx_errors++; if (!ep->tx_full) netif_wake_queue(dev); } @@ -1016,7 +1014,7 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) static void epic_tx_error(struct net_device *dev, struct epic_private *ep, int status) { - struct net_device_stats *stats = &ep->stats; + struct net_device_stats *stats = &dev->stats; #ifndef final_version /* There was an major error, log it. */ @@ -1053,9 +1051,9 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) break; /* It still hasn't been Txed */ if (likely(txstatus & 0x0001)) { - ep->stats.collisions += (txstatus >> 8) & 15; - ep->stats.tx_packets++; - ep->stats.tx_bytes += ep->tx_skbuff[entry]->len; + dev->stats.collisions += (txstatus >> 8) & 15; + dev->stats.tx_packets++; + dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; } else epic_tx_error(dev, ep, txstatus); @@ -1125,12 +1123,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) goto out; /* Always update the error counts to avoid overhead later. */ - ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); - ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); - ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); + dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); + dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); if (status & TxUnderrun) { /* Tx FIFO underflow. */ - ep->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; outl(ep->tx_threshold += 128, ioaddr + TxThresh); /* Restart the transmit process. */ outl(RestartTx, ioaddr + COMMAND); @@ -1183,10 +1181,10 @@ static int epic_rx(struct net_device *dev, int budget) if (status & 0x2000) { printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " "multiple buffers, status %4.4x!\n", dev->name, status); - ep->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else if (status & 0x0006) /* Rx Frame errors are counted in hardware. */ - ep->stats.rx_errors++; + dev->stats.rx_errors++; } else { /* Malloc up new buffer, compatible with net-2e. */ /* Omit the four octet CRC from the length. */ @@ -1223,8 +1221,8 @@ static int epic_rx(struct net_device *dev, int budget) } skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); - ep->stats.rx_packets++; - ep->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } work_done++; entry = (++ep->cur_rx) % RX_RING_SIZE; @@ -1259,7 +1257,7 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep) if (status == EpicRemoved) return; if (status & RxOverflow) /* Missed a Rx frame. */ - ep->stats.rx_errors++; + dev->stats.rx_errors++; if (status & (RxOverflow | RxFull)) outw(RxQueued, ioaddr + COMMAND); } @@ -1357,17 +1355,16 @@ static int epic_close(struct net_device *dev) static struct net_device_stats *epic_get_stats(struct net_device *dev) { - struct epic_private *ep = netdev_priv(dev); long ioaddr = dev->base_addr; if (netif_running(dev)) { /* Update the error counts. */ - ep->stats.rx_missed_errors += inb(ioaddr + MPCNT); - ep->stats.rx_frame_errors += inb(ioaddr + ALICNT); - ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); + dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); + dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); } - return &ep->stats; + return &dev->stats; } /* Set or clear the multicast filter for this adaptor. diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 874973f558e92ee1b7ddc4fba185aecdc49f127a..10e39f2b31c3993e7e5bce57be2944702235481a 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -1442,8 +1442,10 @@ int __init init_module(void) dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]); if(io[this_dev] == 0) { - if(this_dev != 0) /* Only autoprobe 1st one */ + if (this_dev != 0) { /* Only autoprobe 1st one */ + free_netdev(dev); break; + } printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n"); } diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 6ed2df14ec8490eba68ed4b7f8af104830fe4949..6d653c459c1f7e817661225619ddb837ddb0622e 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -180,9 +180,9 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * @dty_tx: last buffer actually sent * @num_rx: number of receive buffers * @cur_rx: current receive buffer + * @vma: pointer to array of virtual memory addresses for buffers * @netdev: pointer to network device structure * @napi: NAPI structure - * @stats: network device statistics * @msg_enable: device state flags * @rx_lock: receive lock * @lock: device lock @@ -203,9 +203,10 @@ struct ethoc { unsigned int num_rx; unsigned int cur_rx; + void** vma; + struct net_device *netdev; struct napi_struct napi; - struct net_device_stats stats; u32 msg_enable; spinlock_t rx_lock; @@ -285,18 +286,22 @@ static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) ethoc_write(dev, MODER, mode); } -static int ethoc_init_ring(struct ethoc *dev) +static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) { struct ethoc_bd bd; int i; + void* vma; dev->cur_tx = 0; dev->dty_tx = 0; dev->cur_rx = 0; + ethoc_write(dev, TX_BD_NUM, dev->num_tx); + /* setup transmission buffers */ - bd.addr = virt_to_phys(dev->membase); + bd.addr = mem_start; bd.stat = TX_BD_IRQ | TX_BD_CRC; + vma = dev->membase; for (i = 0; i < dev->num_tx; i++) { if (i == dev->num_tx - 1) @@ -304,6 +309,9 @@ static int ethoc_init_ring(struct ethoc *dev) ethoc_write_bd(dev, i, &bd); bd.addr += ETHOC_BUFSIZ; + + dev->vma[i] = vma; + vma += ETHOC_BUFSIZ; } bd.stat = RX_BD_EMPTY | RX_BD_IRQ; @@ -314,6 +322,9 @@ static int ethoc_init_ring(struct ethoc *dev) ethoc_write_bd(dev, dev->num_tx + i, &bd); bd.addr += ETHOC_BUFSIZ; + + dev->vma[dev->num_tx + i] = vma; + vma += ETHOC_BUFSIZ; } return 0; @@ -354,39 +365,39 @@ static unsigned int ethoc_update_rx_stats(struct ethoc *dev, if (bd->stat & RX_BD_TL) { dev_err(&netdev->dev, "RX: frame too long\n"); - dev->stats.rx_length_errors++; + netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_SF) { dev_err(&netdev->dev, "RX: frame too short\n"); - dev->stats.rx_length_errors++; + netdev->stats.rx_length_errors++; ret++; } if (bd->stat & RX_BD_DN) { dev_err(&netdev->dev, "RX: dribble nibble\n"); - dev->stats.rx_frame_errors++; + netdev->stats.rx_frame_errors++; } if (bd->stat & RX_BD_CRC) { dev_err(&netdev->dev, "RX: wrong CRC\n"); - dev->stats.rx_crc_errors++; + netdev->stats.rx_crc_errors++; ret++; } if (bd->stat & RX_BD_OR) { dev_err(&netdev->dev, "RX: overrun\n"); - dev->stats.rx_over_errors++; + netdev->stats.rx_over_errors++; ret++; } if (bd->stat & RX_BD_MISS) - dev->stats.rx_missed_errors++; + netdev->stats.rx_missed_errors++; if (bd->stat & RX_BD_LC) { dev_err(&netdev->dev, "RX: late collision\n"); - dev->stats.collisions++; + netdev->stats.collisions++; ret++; } @@ -415,18 +426,18 @@ static int ethoc_rx(struct net_device *dev, int limit) skb = netdev_alloc_skb_ip_align(dev, size); if (likely(skb)) { - void *src = phys_to_virt(bd.addr); + void *src = priv->vma[entry]; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); - priv->stats.rx_packets++; - priv->stats.rx_bytes += size; + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; netif_receive_skb(skb); } else { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } } @@ -447,30 +458,30 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) if (bd->stat & TX_BD_LC) { dev_err(&netdev->dev, "TX: late collision\n"); - dev->stats.tx_window_errors++; + netdev->stats.tx_window_errors++; } if (bd->stat & TX_BD_RL) { dev_err(&netdev->dev, "TX: retransmit limit\n"); - dev->stats.tx_aborted_errors++; + netdev->stats.tx_aborted_errors++; } if (bd->stat & TX_BD_UR) { dev_err(&netdev->dev, "TX: underrun\n"); - dev->stats.tx_fifo_errors++; + netdev->stats.tx_fifo_errors++; } if (bd->stat & TX_BD_CS) { dev_err(&netdev->dev, "TX: carrier sense lost\n"); - dev->stats.tx_carrier_errors++; + netdev->stats.tx_carrier_errors++; } if (bd->stat & TX_BD_STATS) - dev->stats.tx_errors++; + netdev->stats.tx_errors++; - dev->stats.collisions += (bd->stat >> 4) & 0xf; - dev->stats.tx_bytes += bd->stat >> 16; - dev->stats.tx_packets++; + netdev->stats.collisions += (bd->stat >> 4) & 0xf; + netdev->stats.tx_bytes += bd->stat >> 16; + netdev->stats.tx_packets++; return 0; } @@ -501,7 +512,7 @@ static void ethoc_tx(struct net_device *dev) static irqreturn_t ethoc_interrupt(int irq, void *dev_id) { - struct net_device *dev = (struct net_device *)dev_id; + struct net_device *dev = dev_id; struct ethoc *priv = netdev_priv(dev); u32 pending; @@ -516,7 +527,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id) if (pending & INT_MASK_BUSY) { dev_err(&dev->dev, "packet dropped\n"); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; } if (pending & INT_MASK_RX) { @@ -600,8 +611,11 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) while (time_before(jiffies, timeout)) { u32 stat = ethoc_read(priv, MIISTATUS); - if (!(stat & MIISTATUS_BUSY)) + if (!(stat & MIISTATUS_BUSY)) { + /* reset MII command register */ + ethoc_write(priv, MIICOMMAND, 0); return 0; + } schedule(); } @@ -618,25 +632,16 @@ static void ethoc_mdio_poll(struct net_device *dev) { } -static int ethoc_mdio_probe(struct net_device *dev) +static int __devinit ethoc_mdio_probe(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); struct phy_device *phy; - int i; + int err; - for (i = 0; i < PHY_MAX_ADDR; i++) { - phy = priv->mdio->phy_map[i]; - if (phy) { - if (priv->phy_id != -1) { - /* attach to specified PHY */ - if (priv->phy_id == phy->addr) - break; - } else { - /* autoselect PHY if none was specified */ - if (phy->addr != 0) - break; - } - } + if (priv->phy_id != -1) { + phy = priv->mdio->phy_map[priv->phy_id]; + } else { + phy = phy_find_first(priv->mdio); } if (!phy) { @@ -644,11 +649,11 @@ static int ethoc_mdio_probe(struct net_device *dev) return -ENXIO; } - phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0, + err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0, PHY_INTERFACE_MODE_GMII); - if (IS_ERR(phy)) { + if (err) { dev_err(&dev->dev, "could not attach to PHY\n"); - return PTR_ERR(phy); + return err; } priv->phy = phy; @@ -658,8 +663,6 @@ static int ethoc_mdio_probe(struct net_device *dev) static int ethoc_open(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); - unsigned int min_tx = 2; - unsigned int num_bd; int ret; ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, @@ -667,14 +670,7 @@ static int ethoc_open(struct net_device *dev) if (ret) return ret; - /* calculate the number of TX/RX buffers, maximum 128 supported */ - num_bd = min_t(unsigned int, - 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); - priv->num_tx = max(min_tx, num_bd / 4); - priv->num_rx = num_bd - priv->num_tx; - ethoc_write(priv, TX_BD_NUM, priv->num_tx); - - ethoc_init_ring(priv); + ethoc_init_ring(priv, dev->mem_start); ethoc_reset(priv); if (netif_queue_stopped(dev)) { @@ -734,7 +730,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) phy = priv->phy; } - return phy_mii_ioctl(phy, mdio, cmd); + return phy_mii_ioctl(phy, ifr, cmd); } static int ethoc_config(struct net_device *dev, struct ifmap *map) @@ -812,8 +808,7 @@ static void ethoc_tx_timeout(struct net_device *dev) static struct net_device_stats *ethoc_stats(struct net_device *dev) { - struct ethoc *priv = netdev_priv(dev); - return &priv->stats; + return &dev->stats; } static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -824,7 +819,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) void *dest; if (unlikely(skb->len > ETHOC_BUFSIZ)) { - priv->stats.tx_errors++; + dev->stats.tx_errors++; goto out; } @@ -838,7 +833,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) else bd.stat &= ~TX_BD_PAD; - dest = phys_to_virt(bd.addr); + dest = priv->vma[entry]; memcpy_toio(dest, skb->data, skb->len); bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); @@ -876,7 +871,7 @@ static const struct net_device_ops ethoc_netdev_ops = { * ethoc_probe() - initialize OpenCores ethernet MAC * pdev: platform device */ -static int ethoc_probe(struct platform_device *pdev) +static int __devinit ethoc_probe(struct platform_device *pdev) { struct net_device *netdev = NULL; struct resource *res = NULL; @@ -884,6 +879,7 @@ static int ethoc_probe(struct platform_device *pdev) struct resource *mem = NULL; struct ethoc *priv = NULL; unsigned int phy; + int num_bd; int ret = 0; /* allocate networking device */ @@ -965,7 +961,7 @@ static int ethoc_probe(struct platform_device *pdev) } } else { /* Allocate buffer memory */ - priv->membase = dma_alloc_coherent(NULL, + priv->membase = dmam_alloc_coherent(&pdev->dev, buffer_size, (void *)&netdev->mem_start, GFP_KERNEL); if (!priv->membase) { @@ -978,6 +974,18 @@ static int ethoc_probe(struct platform_device *pdev) priv->dma_alloc = buffer_size; } + /* calculate the number of TX/RX buffers, maximum 128 supported */ + num_bd = min_t(unsigned int, + 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); + priv->num_tx = max(2, num_bd / 4); + priv->num_rx = num_bd - priv->num_tx; + + priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); + if (!priv->vma) { + ret = -ENOMEM; + goto error; + } + /* Allow the platform setup code to pass in a MAC address. */ if (pdev->dev.platform_data) { struct ethoc_platform_data *pdata = @@ -1063,21 +1071,6 @@ static int ethoc_probe(struct platform_device *pdev) kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: - if (priv) { - if (priv->dma_alloc) - dma_free_coherent(NULL, priv->dma_alloc, priv->membase, - netdev->mem_start); - else if (priv->membase) - devm_iounmap(&pdev->dev, priv->membase); - if (priv->iobase) - devm_iounmap(&pdev->dev, priv->iobase); - } - if (mem) - devm_release_mem_region(&pdev->dev, mem->start, - mem->end - mem->start + 1); - if (mmio) - devm_release_mem_region(&pdev->dev, mmio->start, - mmio->end - mmio->start + 1); free_netdev(netdev); out: return ret; @@ -1087,7 +1080,7 @@ static int ethoc_probe(struct platform_device *pdev) * ethoc_remove() - shutdown OpenCores ethernet MAC * @pdev: platform device */ -static int ethoc_remove(struct platform_device *pdev) +static int __devexit ethoc_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); @@ -1104,17 +1097,6 @@ static int ethoc_remove(struct platform_device *pdev) kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } - if (priv->dma_alloc) - dma_free_coherent(NULL, priv->dma_alloc, priv->membase, - netdev->mem_start); - else { - devm_iounmap(&pdev->dev, priv->membase); - devm_release_mem_region(&pdev->dev, netdev->mem_start, - netdev->mem_end - netdev->mem_start + 1); - } - devm_iounmap(&pdev->dev, priv->iobase); - devm_release_mem_region(&pdev->dev, netdev->base_addr, - priv->io_region_size); unregister_netdev(netdev); free_netdev(netdev); } @@ -1139,7 +1121,7 @@ static int ethoc_resume(struct platform_device *pdev) static struct platform_driver ethoc_driver = { .probe = ethoc_probe, - .remove = ethoc_remove, + .remove = __devexit_p(ethoc_remove), .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 15f4f8d3d46d9e24396f783d79a027f97d90c246..d7e8f6b8f4cf38c54107b5a616d87af7c0b5db3d 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -382,8 +382,6 @@ struct netdev_private { spinlock_t lock; - struct net_device_stats stats; - /* Media monitoring timer. */ struct timer_list timer; @@ -1234,7 +1232,7 @@ static void fealnx_tx_timeout(struct net_device *dev) spin_unlock_irqrestore(&np->lock, flags); dev->trans_start = jiffies; /* prevent tx timeout */ - np->stats.tx_errors++; + dev->stats.tx_errors++; netif_wake_queue(dev); /* or .._start_.. ?? */ } @@ -1479,10 +1477,11 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) if (intr_status & CNTOVF) { /* missed pkts */ - np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; + dev->stats.rx_missed_errors += + ioread32(ioaddr + TALLY) & 0x7fff; /* crc error */ - np->stats.rx_crc_errors += + dev->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; } @@ -1513,30 +1512,30 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) if (!(np->crvalue & CR_W_ENH)) { if (tx_status & (CSL | LC | EC | UDF | HF)) { - np->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & EC) - np->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (tx_status & CSL) - np->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & LC) - np->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (tx_status & UDF) - np->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if ((tx_status & HF) && np->mii.full_duplex == 0) - np->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; } else { - np->stats.tx_bytes += + dev->stats.tx_bytes += ((tx_control & PKTSMask) >> PKTSShift); - np->stats.collisions += + dev->stats.collisions += ((tx_status & NCRMask) >> NCRShift); - np->stats.tx_packets++; + dev->stats.tx_packets++; } } else { - np->stats.tx_bytes += + dev->stats.tx_bytes += ((tx_control & PKTSMask) >> PKTSShift); - np->stats.tx_packets++; + dev->stats.tx_packets++; } /* Free the original skb. */ @@ -1564,10 +1563,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) long data; data = ioread32(ioaddr + TSR); - np->stats.tx_errors += (data & 0xff000000) >> 24; - np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; - np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; - np->stats.collisions += (data & 0x0000ffff); + dev->stats.tx_errors += (data & 0xff000000) >> 24; + dev->stats.tx_aborted_errors += + (data & 0xff000000) >> 24; + dev->stats.tx_window_errors += + (data & 0x00ff0000) >> 16; + dev->stats.collisions += (data & 0x0000ffff); } if (--boguscnt < 0) { @@ -1593,10 +1594,11 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) /* read the tally counters */ /* missed pkts */ - np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; + dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; /* crc error */ - np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; + dev->stats.rx_crc_errors += + (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; if (debug) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", @@ -1635,13 +1637,13 @@ static int netdev_rx(struct net_device *dev) "%s: Receive error, Rx status %8.8x.\n", dev->name, rx_status); - np->stats.rx_errors++; /* end of a packet. */ + dev->stats.rx_errors++; /* end of a packet. */ if (rx_status & (LONG | RUNT)) - np->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (rx_status & RXER) - np->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rx_status & CRC) - np->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } else { int need_to_reset = 0; int desno = 0; @@ -1667,7 +1669,7 @@ static int netdev_rx(struct net_device *dev) if (need_to_reset == 0) { int i; - np->stats.rx_length_errors++; + dev->stats.rx_length_errors++; /* free all rx descriptors related this long pkt */ for (i = 0; i < desno; ++i) { @@ -1733,8 +1735,8 @@ static int netdev_rx(struct net_device *dev) } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - np->stats.rx_packets++; - np->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } np->cur_rx = np->cur_rx->next_desc_logical; @@ -1754,11 +1756,13 @@ static struct net_device_stats *get_stats(struct net_device *dev) /* The chip only need report frame silently dropped. */ if (netif_running(dev)) { - np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; - np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; + dev->stats.rx_missed_errors += + ioread32(ioaddr + TALLY) & 0x7fff; + dev->stats.rx_crc_errors += + (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; } - return &np->stats; + return &dev->stats; } diff --git a/drivers/net/fec.c b/drivers/net/fec.c index edfff92a6d8ebd8c141b9697b216efccfbbec226..768b840aeb6b7b0bf2ceec87469bb0b4925b5d20 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -118,6 +118,8 @@ static unsigned char fec_mac_default[] = { #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) + /* The FEC stores dest/src/type, data, and checksum for receive packets. */ #define PKT_MAXBUF_SIZE 1518 @@ -187,6 +189,7 @@ struct fec_enet_private { int index; int link; int full_duplex; + struct completion mdio_done; }; static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); @@ -205,12 +208,12 @@ static void fec_stop(struct net_device *dev); #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) -#define FEC_MII_TIMEOUT 10000 +#define FEC_MII_TIMEOUT 1000 /* us */ /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) -static int +static netdev_tx_t fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); @@ -334,6 +337,11 @@ fec_enet_interrupt(int irq, void * dev_id) ret = IRQ_HANDLED; fec_enet_tx(dev); } + + if (int_events & FEC_ENET_MII) { + ret = IRQ_HANDLED; + complete(&fep->mdio_done); + } } while (int_events); return ret; @@ -608,18 +616,13 @@ static void fec_enet_adjust_link(struct net_device *dev) phy_print_status(phy_dev); } -/* - * NOTE: a MII transaction is during around 25 us, so polling it... - */ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; - int timeout = FEC_MII_TIMEOUT; + unsigned long time_left; fep->mii_timeout = 0; - - /* clear MII end of transfer bit*/ - writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); + init_completion(&fep->mdio_done); /* start a read op */ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | @@ -627,13 +630,12 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ - while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { - cpu_relax(); - if (timeout-- < 0) { - fep->mii_timeout = 1; - printk(KERN_ERR "FEC: MDIO read timeout\n"); - return -ETIMEDOUT; - } + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + fep->mii_timeout = 1; + printk(KERN_ERR "FEC: MDIO read timeout\n"); + return -ETIMEDOUT; } /* return value */ @@ -644,12 +646,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; - int timeout = FEC_MII_TIMEOUT; + unsigned long time_left; fep->mii_timeout = 0; - - /* clear MII end of transfer bit*/ - writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); + init_completion(&fep->mdio_done); /* start a read op */ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | @@ -658,13 +658,12 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ - while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { - cpu_relax(); - if (timeout-- < 0) { - fep->mii_timeout = 1; - printk(KERN_ERR "FEC: MDIO write timeout\n"); - return -ETIMEDOUT; - } + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + fep->mii_timeout = 1; + printk(KERN_ERR "FEC: MDIO write timeout\n"); + return -ETIMEDOUT; } return 0; @@ -679,30 +678,24 @@ static int fec_enet_mii_probe(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); struct phy_device *phy_dev = NULL; - int phy_addr; + int ret; fep->phy_dev = NULL; /* find the first phy */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (fep->mii_bus->phy_map[phy_addr]) { - phy_dev = fep->mii_bus->phy_map[phy_addr]; - break; - } - } - + phy_dev = phy_find_first(fep->mii_bus); if (!phy_dev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV; } /* attach the mac to the phy */ - phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), + ret = phy_connect_direct(dev, phy_dev, &fec_enet_adjust_link, 0, PHY_INTERFACE_MODE_MII); - if (IS_ERR(phy_dev)) { + if (ret) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); - return PTR_ERR(phy_dev); + return ret; } /* mask with MAC supported features */ @@ -834,7 +827,7 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, if_mii(rq), cmd); + return phy_mii_ioctl(phydev, rq, cmd); } static void fec_enet_free_buffers(struct net_device *dev) @@ -1222,7 +1215,7 @@ fec_restart(struct net_device *dev, int duplex) writel(0, fep->hwp + FEC_R_DES_ACTIVE); /* Enable interrupts we wish to service */ - writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } static void @@ -1241,11 +1234,8 @@ fec_stop(struct net_device *dev) /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); - - /* Clear outstanding MII command interrupts. */ - writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); - writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } static int __devinit @@ -1365,10 +1355,11 @@ fec_drv_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int -fec_suspend(struct platform_device *dev, pm_message_t state) +fec_suspend(struct device *dev) { - struct net_device *ndev = platform_get_drvdata(dev); + struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep; if (ndev) { @@ -1381,9 +1372,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state) } static int -fec_resume(struct platform_device *dev) +fec_resume(struct device *dev) { - struct net_device *ndev = platform_get_drvdata(dev); + struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep; if (ndev) { @@ -1395,15 +1386,26 @@ fec_resume(struct platform_device *dev) return 0; } +static const struct dev_pm_ops fec_pm_ops = { + .suspend = fec_suspend, + .resume = fec_resume, + .freeze = fec_suspend, + .thaw = fec_resume, + .poweroff = fec_suspend, + .restore = fec_resume, +}; +#endif + static struct platform_driver fec_driver = { .driver = { - .name = "fec", - .owner = THIS_MODULE, + .name = "fec", + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &fec_pm_ops, +#endif }, - .probe = fec_probe, - .remove = __devexit_p(fec_drv_remove), - .suspend = fec_suspend, - .resume = fec_resume, + .probe = fec_probe, + .remove = __devexit_p(fec_drv_remove), }; static int __init diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 25e6cc6840b1ecf1993630d2f1b5acf19a3675b6..d1a5b17b2a95567b3466674ec61acdc85657a5b2 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -826,7 +826,7 @@ static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!priv->phydev) return -ENOTSUPP; - return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); + return phy_mii_ioctl(priv->phydev, rq, cmd); } static const struct net_device_ops mpc52xx_fec_netdev_ops = { @@ -875,17 +875,21 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) if (rv) { printk(KERN_ERR DRIVER_NAME ": " "Error while parsing device node resource\n" ); - return rv; + goto err_netdev; } if ((mem.end - mem.start + 1) < sizeof(struct mpc52xx_fec)) { printk(KERN_ERR DRIVER_NAME " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n", (unsigned long)(mem.end - mem.start + 1), sizeof(struct mpc52xx_fec)); - return -EINVAL; + rv = -EINVAL; + goto err_netdev; } - if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), DRIVER_NAME)) - return -EBUSY; + if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), + DRIVER_NAME)) { + rv = -EBUSY; + goto err_netdev; + } /* Init ether ndev with what we have */ ndev->netdev_ops = &mpc52xx_fec_netdev_ops; @@ -901,7 +905,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) if (!priv->fec) { rv = -ENOMEM; - goto probe_error; + goto err_mem_region; } /* Bestcomm init */ @@ -914,7 +918,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) if (!priv->rx_dmatsk || !priv->tx_dmatsk) { printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" ); rv = -ENOMEM; - goto probe_error; + goto err_rx_tx_dmatsk; } /* Get the IRQ we need one by one */ @@ -966,33 +970,25 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) rv = register_netdev(ndev); if (rv < 0) - goto probe_error; + goto err_node; /* We're done ! */ dev_set_drvdata(&op->dev, ndev); return 0; - - /* Error handling - free everything that might be allocated */ -probe_error: - - if (priv->phy_node) - of_node_put(priv->phy_node); - priv->phy_node = NULL; - +err_node: + of_node_put(priv->phy_node); irq_dispose_mapping(ndev->irq); - +err_rx_tx_dmatsk: if (priv->rx_dmatsk) bcom_fec_rx_release(priv->rx_dmatsk); if (priv->tx_dmatsk) bcom_fec_tx_release(priv->tx_dmatsk); - - if (priv->fec) - iounmap(priv->fec); - + iounmap(priv->fec); +err_mem_region: release_mem_region(mem.start, sizeof(struct mpc52xx_fec)); - +err_netdev: free_netdev(ndev); return rv; diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 006f64d9f96a695e6fbde63b0269121da323df0b..dbaf72cbb23383d431a201672d195b73fdc51bc8 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c @@ -29,15 +29,14 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, int reg, u32 value) { struct mpc52xx_fec_mdio_priv *priv = bus->priv; - struct mpc52xx_fec __iomem *fec; + struct mpc52xx_fec __iomem *fec = priv->regs; int tries = 3; value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; - fec = priv->regs; out_be32(&fec->ievent, FEC_IEVENT_MII); - out_be32(&priv->regs->mii_data, value); + out_be32(&fec->mii_data, value); /* wait for it to finish, this takes about 23 us on lite5200b */ while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) @@ -47,7 +46,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, return -ETIMEDOUT; return value & FEC_MII_DATA_OP_RD ? - in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0; + in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0; } static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) @@ -69,9 +68,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, struct device_node *np = of->dev.of_node; struct mii_bus *bus; struct mpc52xx_fec_mdio_priv *priv; - struct resource res = {}; + struct resource res; int err; - int i; bus = mdiobus_alloc(); if (bus == NULL) @@ -93,7 +91,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, err = of_address_to_resource(np, 0, &res); if (err) goto out_free; - priv->regs = ioremap(res.start, res.end - res.start + 1); + priv->regs = ioremap(res.start, resource_size(&res)); if (priv->regs == NULL) { err = -ENOMEM; goto out_free; @@ -118,10 +116,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, out_unmap: iounmap(priv->regs); out_free: - for (i=0; iirq[i] != PHY_POLL) - irq_dispose_mapping(bus->irq[i]); - kfree(bus->irq); kfree(priv); mdiobus_free(bus); @@ -133,23 +127,16 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of) struct device *dev = &of->dev; struct mii_bus *bus = dev_get_drvdata(dev); struct mpc52xx_fec_mdio_priv *priv = bus->priv; - int i; mdiobus_unregister(bus); dev_set_drvdata(dev, NULL); - iounmap(priv->regs); - for (i=0; iirq[i] != PHY_POLL) - irq_dispose_mapping(bus->irq[i]); kfree(priv); - kfree(bus->irq); mdiobus_free(bus); return 0; } - static struct of_device_id mpc52xx_fec_mdio_match[] = { { .compatible = "fsl,mpc5200b-mdio", }, { .compatible = "fsl,mpc5200-mdio", }, @@ -171,5 +158,4 @@ struct of_platform_driver mpc52xx_fec_mdio_driver = { /* let fec driver call it, since this has to be registered before it */ EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); - MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 268ea4d566d732b1fd0705898d82dd854eb8be8f..4da05b1b445c033a3606bdd0d1cf05ca0675660b 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -89,8 +89,10 @@ #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ -#define DEV_HAS_STATISTICS_V2 0x0000600 /* device supports hw statistics version 2 */ -#define DEV_HAS_STATISTICS_V3 0x0000e00 /* device supports hw statistics version 3 */ +#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ +#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ +#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ +#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ @@ -2468,7 +2470,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) struct ring_desc_ex* orig_get_tx = np->get_tx.ex; while ((np->get_tx.ex != np->put_tx.ex) && - !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && + !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && (tx_work < limit)) { dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", @@ -6067,111 +6069,111 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0372), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0373), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x03E5), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x03E6), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x03EE), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x03EF), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0450), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0451), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0452), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0453), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x054C), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x054D), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x054E), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x054F), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x07DC), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x07DD), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x07DE), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x07DF), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0760), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0761), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0762), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0763), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0AB0), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0AB1), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0AB2), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0AB3), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, }, { /* MCP89 Ethernet Controller */ PCI_DEVICE(0x10DE, 0x0D7D), - .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, }, {0,}, }; diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 309a0eaddd815739d33c196d94ce3aad770cec34..f08cff9020bdc4972b0f76dbaa2b61eb880d0f95 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -963,12 +963,11 @@ static const struct ethtool_ops fs_ethtool_ops = { static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct fs_enet_private *fep = netdev_priv(dev); - struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; if (!netif_running(dev)) return -EINVAL; - return phy_mii_ioctl(fep->phydev, mii, cmd); + return phy_mii_ioctl(fep->phydev, rq, cmd); } extern int fs_mii_connect(struct net_device *dev); diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h index 1f7d865cedb6791af214de3bd52362b0f906a399..bd17a2a0139bf6dd30e000582c5e8e43826e42ce 100644 --- a/drivers/net/fsl_pq_mdio.h +++ b/drivers/net/fsl_pq_mdio.h @@ -39,7 +39,7 @@ struct fsl_pq_mdio { u8 reserved[28]; /* Space holder */ u32 utbipar; /* TBI phy address reg (only on UCC) */ u8 res4[2728]; -} __attribute__ ((packed)); +} __packed; int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 28b53d1cd4f168701fa4ce41a7c5e0edbaa6f783..27f02970d89874d7876d65366bacd3ac4ef3cf64 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -85,6 +85,7 @@ #include #include +#include #include #include #include @@ -685,8 +686,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) priv->rx_queue[i] = NULL; for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( - sizeof (struct gfar_priv_tx_q), GFP_KERNEL); + priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), + GFP_KERNEL); if (!priv->tx_queue[i]) { err = -ENOMEM; goto tx_alloc_failed; @@ -698,8 +699,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) } for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( - sizeof (struct gfar_priv_rx_q), GFP_KERNEL); + priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), + GFP_KERNEL); if (!priv->rx_queue[i]) { err = -ENOMEM; goto rx_alloc_failed; @@ -846,7 +847,7 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!priv->phydev) return -ENODEV; - return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); + return phy_mii_ioctl(priv->phydev, rq, cmd); } static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) @@ -928,6 +929,34 @@ static void gfar_init_filer_table(struct gfar_private *priv) } } +static void gfar_detect_errata(struct gfar_private *priv) +{ + struct device *dev = &priv->ofdev->dev; + unsigned int pvr = mfspr(SPRN_PVR); + unsigned int svr = mfspr(SPRN_SVR); + unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ + unsigned int rev = svr & 0xffff; + + /* MPC8313 Rev 2.0 and higher; All MPC837x */ + if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_74; + + /* MPC8313 and MPC837x all rev */ + if ((pvr == 0x80850010 && mod == 0x80b0) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_76; + + /* MPC8313 and MPC837x all rev */ + if ((pvr == 0x80850010 && mod == 0x80b0) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_A002; + + if (priv->errata) + dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", + priv->errata); +} + /* Set up the ethernet device structure, private data, * and anything else we need before we start */ static int gfar_probe(struct of_device *ofdev, @@ -960,6 +989,8 @@ static int gfar_probe(struct of_device *ofdev, dev_set_drvdata(&ofdev->dev, priv); regs = priv->gfargrp[0].regs; + gfar_detect_errata(priv); + /* Stop the DMA engine now, in case it was running before */ /* (The firmware could have used it, and left it running). */ gfar_halt(dev); @@ -974,7 +1005,10 @@ static int gfar_probe(struct of_device *ofdev, gfar_write(®s->maccfg1, tempval); /* Initialize MACCFG2. */ - gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS); + tempval = MACCFG2_INIT_SETTINGS; + if (gfar_has_errata(priv, GFAR_ERRATA_74)) + tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; + gfar_write(®s->maccfg2, tempval); /* Initialize ECNTRL */ gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); @@ -1541,6 +1575,29 @@ static void init_registers(struct net_device *dev) gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); } +static int __gfar_is_rx_idle(struct gfar_private *priv) +{ + u32 res; + + /* + * Normaly TSEC should not hang on GRS commands, so we should + * actually wait for IEVENT_GRSC flag. + */ + if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) + return 0; + + /* + * Read the eTSEC register at offset 0xD1C. If bits 7-14 are + * the same as bits 23-30, the eTSEC Rx is assumed to be idle + * and the Rx can be safely reset. + */ + res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); + res &= 0x7f807f80; + if ((res & 0xffff) == (res >> 16)) + return 1; + + return 0; +} /* Halt the receive and transmit queues */ static void gfar_halt_nodisable(struct net_device *dev) @@ -1564,12 +1621,18 @@ static void gfar_halt_nodisable(struct net_device *dev) tempval = gfar_read(®s->dmactrl); if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != (DMACTRL_GRS | DMACTRL_GTS)) { + int ret; + tempval |= (DMACTRL_GRS | DMACTRL_GTS); gfar_write(®s->dmactrl, tempval); - spin_event_timeout(((gfar_read(®s->ievent) & - (IEVENT_GRSC | IEVENT_GTSC)) == - (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); + do { + ret = spin_event_timeout(((gfar_read(®s->ievent) & + (IEVENT_GRSC | IEVENT_GTSC)) == + (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); + if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) + ret = __gfar_is_rx_idle(priv); + } while (!ret); } } @@ -1987,6 +2050,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int nr_frags, nr_txbds, length; union skb_shared_tx *shtx; + /* + * TOE=1 frames larger than 2500 bytes may see excess delays + * before start of transmission. + */ + if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && + skb->ip_summed == CHECKSUM_PARTIAL && + skb->len > 2500)) { + int ret; + + ret = skb_checksum_help(skb); + if (ret) + return ret; + } + rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; txq = netdev_get_tx_queue(dev, rq); @@ -2300,7 +2377,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) * to allow huge frames, and to check the length */ tempval = gfar_read(®s->maccfg2); - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || + gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); else tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); @@ -2342,6 +2420,15 @@ static void gfar_timeout(struct net_device *dev) schedule_work(&priv->reset_task); } +static void gfar_align_skb(struct sk_buff *skb) +{ + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, RXBUF_ALIGNMENT - + (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); +} + /* Interrupt Handler for Transmit complete */ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { @@ -2426,9 +2513,10 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) */ if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && skb_recycle_check(skb, priv->rx_buffer_size + - RXBUF_ALIGNMENT)) + RXBUF_ALIGNMENT)) { + gfar_align_skb(skb); __skb_queue_head(&priv->rx_recycle, skb); - else + } else dev_kfree_skb_any(skb); tx_queue->tx_skbuff[skb_dirtytx] = NULL; @@ -2491,29 +2579,28 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, gfar_init_rxbdp(rx_queue, bdp, buf); } - -struct sk_buff * gfar_new_skb(struct net_device *dev) +static struct sk_buff * gfar_alloc_skb(struct net_device *dev) { - unsigned int alignamount; struct gfar_private *priv = netdev_priv(dev); struct sk_buff *skb = NULL; - skb = __skb_dequeue(&priv->rx_recycle); - if (!skb) - skb = netdev_alloc_skb(dev, - priv->rx_buffer_size + RXBUF_ALIGNMENT); - + skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); if (!skb) return NULL; - alignamount = RXBUF_ALIGNMENT - - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); + gfar_align_skb(skb); - /* We need the data buffer to be aligned properly. We will reserve - * as many bytes as needed to align the data properly - */ - skb_reserve(skb, alignamount); - GFAR_CB(skb)->alignamount = alignamount; + return skb; +} + +struct sk_buff * gfar_new_skb(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb = NULL; + + skb = __skb_dequeue(&priv->rx_recycle); + if (!skb) + skb = gfar_alloc_skb(dev); return skb; } @@ -2666,17 +2753,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) if (unlikely(!newskb)) newskb = skb; - else if (skb) { - /* - * We need to un-reserve() the skb to what it - * was before gfar_new_skb() re-aligned - * it to an RXBUF_ALIGNMENT boundary - * before we put the skb back on the - * recycle list. - */ - skb_reserve(skb, -GFAR_CB(skb)->alignamount); + else if (skb) __skb_queue_head(&priv->rx_recycle, skb); - } } else { /* Increment the number of packets */ rx_queue->stats.rx_packets++; diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index ac4a92e08c09b67b304d3084bffd9a494c80afb5..710810e2adb4aedc86f9c96051a3ee76980970a7 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -1025,6 +1025,12 @@ struct gfar_priv_grp { char int_name_er[GFAR_INT_NAME_MAX]; }; +enum gfar_errata { + GFAR_ERRATA_74 = 0x01, + GFAR_ERRATA_76 = 0x02, + GFAR_ERRATA_A002 = 0x04, +}; + /* Struct stolen almost completely (and shamelessly) from the FCC enet source * (Ok, that's not so true anymore, but there is a family resemblence) * The GFAR buffer descriptors track the ring buffers. The rx_bd_base @@ -1049,6 +1055,7 @@ struct gfar_private { struct device_node *node; struct net_device *ndev; struct of_device *ofdev; + enum gfar_errata errata; struct gfar_priv_grp gfargrp[MAXGROUPS]; struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; @@ -1111,6 +1118,12 @@ struct gfar_private { extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; +static inline int gfar_has_errata(struct gfar_private *priv, + enum gfar_errata err) +{ + return priv->errata & err; +} + static inline u32 gfar_read(volatile unsigned __iomem *addr) { u32 val; diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 3a029d02c2b410405dd37c5e6b1b7e8e4ccb63c9..4d09eab3548e9d6c563ee06356d6b951ee5adb8c 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -1555,7 +1555,6 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev } /* setup NAPI */ - memset(&greth->napi, 0, sizeof(greth->napi)); netif_napi_add(dev, &greth->napi, greth_poll, 64); return 0; diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 61f2b1cfcd46317ada1a0a6333d900810b28d9e0..49aac7027fbb8841f62f11b18492119e02bb0bb1 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -492,7 +492,6 @@ struct hamachi_private { struct sk_buff* tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_ring_dma; dma_addr_t rx_ring_dma; - struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ /* Frequently used and paired value: keep adjacent for cache effect. */ spinlock_t lock; @@ -1036,7 +1035,7 @@ static inline int hamachi_tx(struct net_device *dev) if (entry >= TX_RING_SIZE-1) hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); - hmp->stats.tx_packets++; + dev->stats.tx_packets++; } return 0; @@ -1167,7 +1166,7 @@ static void hamachi_tx_timeout(struct net_device *dev) /* Trigger an immediate transmit demand. */ dev->trans_start = jiffies; /* prevent tx timeout */ - hmp->stats.tx_errors++; + dev->stats.tx_errors++; /* Restart the chip's Tx/Rx processes . */ writew(0x0002, ioaddr + TxCmd); /* STOP Tx */ @@ -1434,7 +1433,7 @@ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance) if (entry >= TX_RING_SIZE-1) hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); - hmp->stats.tx_packets++; + dev->stats.tx_packets++; } if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){ /* The ring is no longer full */ @@ -1525,18 +1524,22 @@ static int hamachi_rx(struct net_device *dev) le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000, le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff, le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length)); - hmp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } /* else Omit for prototype errata??? */ if (frame_status & 0x00380000) { /* There was an error. */ if (hamachi_debug > 2) printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n", frame_status); - hmp->stats.rx_errors++; - if (frame_status & 0x00600000) hmp->stats.rx_length_errors++; - if (frame_status & 0x00080000) hmp->stats.rx_frame_errors++; - if (frame_status & 0x00100000) hmp->stats.rx_crc_errors++; - if (frame_status < 0) hmp->stats.rx_dropped++; + dev->stats.rx_errors++; + if (frame_status & 0x00600000) + dev->stats.rx_length_errors++; + if (frame_status & 0x00080000) + dev->stats.rx_frame_errors++; + if (frame_status & 0x00100000) + dev->stats.rx_crc_errors++; + if (frame_status < 0) + dev->stats.rx_dropped++; } else { struct sk_buff *skb; /* Omit CRC */ @@ -1654,7 +1657,7 @@ static int hamachi_rx(struct net_device *dev) #endif /* RX_CHECKSUM */ netif_rx(skb); - hmp->stats.rx_packets++; + dev->stats.rx_packets++; } entry = (++hmp->cur_rx) % RX_RING_SIZE; } @@ -1724,9 +1727,9 @@ static void hamachi_error(struct net_device *dev, int intr_status) dev->name, intr_status); /* Hmmmmm, it's not clear how to recover from PCI faults. */ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) - hmp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) - hmp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } static int hamachi_close(struct net_device *dev) @@ -1828,19 +1831,27 @@ static struct net_device_stats *hamachi_get_stats(struct net_device *dev) so I think I'll comment it out here and see if better things happen. */ - /* hmp->stats.tx_packets = readl(ioaddr + 0x000); */ - - hmp->stats.rx_bytes = readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */ - hmp->stats.tx_bytes = readl(ioaddr + 0x3B0); /* Total Uni+Brd+Multi */ - hmp->stats.multicast = readl(ioaddr + 0x320); /* Multicast Rx */ - - hmp->stats.rx_length_errors = readl(ioaddr + 0x368); /* Over+Undersized */ - hmp->stats.rx_over_errors = readl(ioaddr + 0x35C); /* Jabber */ - hmp->stats.rx_crc_errors = readl(ioaddr + 0x360); /* Jabber */ - hmp->stats.rx_frame_errors = readl(ioaddr + 0x364); /* Symbol Errs */ - hmp->stats.rx_missed_errors = readl(ioaddr + 0x36C); /* Dropped */ - - return &hmp->stats; + /* dev->stats.tx_packets = readl(ioaddr + 0x000); */ + + /* Total Uni+Brd+Multi */ + dev->stats.rx_bytes = readl(ioaddr + 0x330); + /* Total Uni+Brd+Multi */ + dev->stats.tx_bytes = readl(ioaddr + 0x3B0); + /* Multicast Rx */ + dev->stats.multicast = readl(ioaddr + 0x320); + + /* Over+Undersized */ + dev->stats.rx_length_errors = readl(ioaddr + 0x368); + /* Jabber */ + dev->stats.rx_over_errors = readl(ioaddr + 0x35C); + /* Jabber */ + dev->stats.rx_crc_errors = readl(ioaddr + 0x360); + /* Symbol Errs */ + dev->stats.rx_frame_errors = readl(ioaddr + 0x364); + /* Dropped */ + dev->stats.rx_missed_errors = readl(ioaddr + 0x36C); + + return &dev->stats; } static void set_rx_mode(struct net_device *dev) diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 68e5ac8832ad0b8a1678fb50e38d996fbd684524..ce587f4c4203f0d4a7952a70ff664cb07c1e2c21 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -168,7 +168,6 @@ struct hp100_private { u_char mac1_mode; u_char mac2_mode; u_char hash_bytes[8]; - struct net_device_stats stats; /* Rings for busmaster mode: */ hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */ @@ -721,9 +720,10 @@ static int __devinit hp100_probe1(struct net_device *dev, int ioaddr, /* Conversion to new PCI API : * Pages are always aligned and zeroed, no need to it ourself. * Doc says should be OK for EISA bus as well - Jean II */ - if ((lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr)) == NULL) { + lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr); + if (!lp->page_vaddr_algn) { err = -ENOMEM; - goto out2; + goto out_mem_ptr; } lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn); @@ -799,6 +799,7 @@ static int __devinit hp100_probe1(struct net_device *dev, int ioaddr, pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f, lp->page_vaddr_algn, virt_to_whatever(dev, lp->page_vaddr_algn)); +out_mem_ptr: if (mem_ptr_virt) iounmap(mem_ptr_virt); out2: @@ -1582,8 +1583,8 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, spin_unlock_irqrestore(&lp->lock, flags); /* Update statistics */ - lp->stats.tx_packets++; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; return NETDEV_TX_OK; @@ -1740,8 +1741,8 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */ - lp->stats.tx_packets++; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; hp100_ints_on(); spin_unlock_irqrestore(&lp->lock, flags); @@ -1822,7 +1823,7 @@ static void hp100_rx(struct net_device *dev) printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n", dev->name, pkt_len); #endif - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { /* skb successfully allocated */ u_char *ptr; @@ -1848,8 +1849,8 @@ static void hp100_rx(struct net_device *dev) ptr[9], ptr[10], ptr[11]); #endif netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } /* Indicate the card that we have got the packet */ @@ -1858,7 +1859,7 @@ static void hp100_rx(struct net_device *dev) switch (header & 0x00070000) { case (HP100_MULTI_ADDR_HASH << 16): case (HP100_MULTI_ADDR_NO_HASH << 16): - lp->stats.multicast++; + dev->stats.multicast++; break; } } /* end of while(there are packets) loop */ @@ -1930,7 +1931,7 @@ static void hp100_rx_bm(struct net_device *dev) if (ptr->skb == NULL) { printk("hp100: %s: rx_bm: skb null\n", dev->name); /* can happen if we only allocated room for the pdh due to memory shortage. */ - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { skb_trim(ptr->skb, pkt_len); /* Shorten it */ ptr->skb->protocol = @@ -1938,14 +1939,14 @@ static void hp100_rx_bm(struct net_device *dev) netif_rx(ptr->skb); /* Up and away... */ - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } switch (header & 0x00070000) { case (HP100_MULTI_ADDR_HASH << 16): case (HP100_MULTI_ADDR_NO_HASH << 16): - lp->stats.multicast++; + dev->stats.multicast++; break; } } else { @@ -1954,7 +1955,7 @@ static void hp100_rx_bm(struct net_device *dev) #endif if (ptr->skb != NULL) dev_kfree_skb_any(ptr->skb); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } lp->rxrhead = lp->rxrhead->next; @@ -1992,14 +1993,13 @@ static struct net_device_stats *hp100_get_stats(struct net_device *dev) hp100_update_stats(dev); hp100_ints_on(); spin_unlock_irqrestore(&lp->lock, flags); - return &(lp->stats); + return &(dev->stats); } static void hp100_update_stats(struct net_device *dev) { int ioaddr = dev->base_addr; u_short val; - struct hp100_private *lp = netdev_priv(dev); #ifdef HP100_DEBUG_B hp100_outw(0x4216, TRACE); @@ -2009,14 +2009,14 @@ static void hp100_update_stats(struct net_device *dev) /* Note: Statistics counters clear when read. */ hp100_page(MAC_CTRL); val = hp100_inw(DROPPED) & 0x0fff; - lp->stats.rx_errors += val; - lp->stats.rx_over_errors += val; + dev->stats.rx_errors += val; + dev->stats.rx_over_errors += val; val = hp100_inb(CRC); - lp->stats.rx_errors += val; - lp->stats.rx_crc_errors += val; + dev->stats.rx_errors += val; + dev->stats.rx_crc_errors += val; val = hp100_inb(ABORT); - lp->stats.tx_errors += val; - lp->stats.tx_aborted_errors += val; + dev->stats.tx_errors += val; + dev->stats.tx_aborted_errors += val; hp100_page(PERFORMANCE); } @@ -2025,7 +2025,6 @@ static void hp100_misc_interrupt(struct net_device *dev) #ifdef HP100_DEBUG_B int ioaddr = dev->base_addr; #endif - struct hp100_private *lp = netdev_priv(dev); #ifdef HP100_DEBUG_B int ioaddr = dev->base_addr; @@ -2034,8 +2033,8 @@ static void hp100_misc_interrupt(struct net_device *dev) #endif /* Note: Statistics counters clear when read. */ - lp->stats.rx_errors++; - lp->stats.tx_errors++; + dev->stats.rx_errors++; + dev->stats.tx_errors++; } static void hp100_clear_stats(struct hp100_private *lp, int ioaddr) diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 86438b59fa214e9f2f5c4c62f71b23f48c1e88d8..187622f1c81611f3b5b269c7c730610460c03210 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -63,6 +63,7 @@ static bool igb_sgmii_active_82575(struct e1000_hw *); static s32 igb_reset_init_script_82575(struct e1000_hw *); static s32 igb_read_mac_addr_82575(struct e1000_hw *); static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, @@ -70,6 +71,35 @@ static const u16 e1000_82580_rxpbs_table[] = #define E1000_82580_RXPBS_TABLE_SIZE \ (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + static s32 igb_get_invariants_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; @@ -130,27 +160,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { case E1000_CTRL_EXT_LINK_MODE_SGMII: dev_spec->sgmii_active = true; - ctrl_ext |= E1000_CTRL_I2C_ENA; break; case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; - ctrl_ext |= E1000_CTRL_I2C_ENA; break; default: - ctrl_ext &= ~E1000_CTRL_I2C_ENA; break; } - wr32(E1000_CTRL_EXT, ctrl_ext); - - /* - * if using i2c make certain the MDICNFG register is cleared to prevent - * communications from being misrouted to the mdic registers - */ - if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580)) - wr32(E1000_MDICNFG, 0); - /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ @@ -228,19 +246,29 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 100; + ctrl_ext = rd32(E1000_CTRL_EXT); + /* PHY function pointers */ if (igb_sgmii_active_82575(hw)) { - phy->ops.reset = igb_phy_hw_reset_sgmii_82575; - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; } else if (hw->mac.type >= e1000_82580) { - phy->ops.reset = igb_phy_hw_reset; - phy->ops.read_reg = igb_read_phy_reg_82580; - phy->ops.write_reg = igb_write_phy_reg_82580; + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; } else { - phy->ops.reset = igb_phy_hw_reset; - phy->ops.read_reg = igb_read_phy_reg_igp; - phy->ops.write_reg = igb_write_phy_reg_igp; + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; } /* set lan id */ @@ -295,6 +323,10 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; return igb_acquire_swfw_sync_82575(hw, mask); } @@ -312,6 +344,10 @@ static void igb_release_phy_82575(struct e1000_hw *hw) if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; igb_release_swfw_sync_82575(hw, mask); } @@ -392,6 +428,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_id; u32 ctrl_ext; + u32 mdic; /* * For SGMII PHYs, we try the list of possible addresses until @@ -406,6 +443,29 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) goto out; } + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + /* Power on sgmii phy if it is disabled */ ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); @@ -1492,6 +1552,43 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) return ret_val; } +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + /** * igb_reset_hw_82580 - Reset hardware * @hw: pointer to the HW structure @@ -1567,6 +1664,10 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) wr32(E1000_IMC, 0xffffffff); icr = rd32(E1000_ICR); + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + /* Install any alternate MAC address into RAR0 */ ret_val = igb_check_alt_mac_addr(hw); diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index 24d9be64342ff91d75119c149618194a9e1485ae..bbd2ec308eb06b07963bfc5d658d73318968c406 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -164,6 +164,8 @@ #define E1000_SWFW_EEP_SM 0x1 #define E1000_SWFW_PHY0_SM 0x2 #define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 /* FACTPS Definitions */ /* Device Control */ @@ -466,6 +468,11 @@ #define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 @@ -563,6 +570,10 @@ #define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + /* Mask bits for fields in Word 0x0f of the NVM */ #define NVM_WORD0F_PAUSE_MASK 0x3000 #define NVM_WORD0F_ASM_DIR 0x2000 @@ -696,12 +707,17 @@ #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 /* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 #define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 #define E1000_MDIC_PHY_SHIFT 21 #define E1000_MDIC_OP_WRITE 0x04000000 #define E1000_MDIC_OP_READ 0x08000000 #define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 #define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 /* SerDes Control */ #define E1000_GEN_CTL_READY 0x80000000 diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index f2ebf927e4bc0d2b6ab21285f9c54ebd0937829f..26bf6a13d1c1a6e40beb4f1b8ec9c207fb501e1a 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -1823,12 +1823,10 @@ static void igb_diag_test(struct net_device *netdev, dev_info(&adapter->pdev->dev, "online testing starting\n"); /* PHY is powered down when interface is down */ - if (!netif_carrier_ok(netdev)) { + if (if_running && igb_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + else data[4] = 0; - } else { - if (igb_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - } /* Online tests aren't run; pass by default */ data[0] = 0; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index cea37e0837ff64d3ed4ae16ccc78f85557be9700..df5dcd23e4fcb408384c0991057305a23e41a4f7 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -630,9 +630,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) for (; i < adapter->rss_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); - for (; j < adapter->rss_queues; j++) - adapter->tx_ring[j]->reg_idx = rbase_offset + - Q_IDX_82576(j); } case e1000_82575: case e1000_82580: @@ -996,7 +993,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) /* Number of supported queues. */ adapter->num_rx_queues = adapter->rss_queues; - adapter->num_tx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; /* start with one vector for every rx queue */ numvecs = adapter->num_rx_queues; @@ -1290,7 +1290,13 @@ static void igb_irq_disable(struct igb_adapter *adapter) wr32(E1000_IAM, 0); wr32(E1000_IMC, ~0); wrfl(); - synchronize_irq(adapter->pdev->irq); + if (adapter->msix_entries) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } } /** @@ -2100,9 +2106,6 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; - if (adapter->vfs_allocated_count > 7) - adapter->vfs_allocated_count = 7; - if (adapter->vfs_allocated_count) { adapter->vf_data = kcalloc(adapter->vfs_allocated_count, sizeof(struct vf_data_storage), @@ -2267,7 +2270,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) #ifdef CONFIG_PCI_IOV if (hw->mac.type == e1000_82576) - adapter->vfs_allocated_count = max_vfs; + adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; #endif /* CONFIG_PCI_IOV */ adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); @@ -2729,14 +2732,16 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) } igb_vmm_control(adapter); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | - E1000_MRQC_RSS_FIELD_IPV4_TCP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | - E1000_MRQC_RSS_FIELD_IPV6_TCP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP | - E1000_MRQC_RSS_FIELD_IPV6_UDP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | - E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + /* + * Generate RSS hash based on TCP port numbers and/or + * IPv4/v6 src and dst addresses since UDP cannot be + * hashed reliably due to IP fragmentation + */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; wr32(E1000_MRQC, mrqc); } @@ -4986,6 +4991,10 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) { + /* + * The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ unsigned char *addr = (char *)&msg[1]; int err = -1; diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index add6197d3bcb0409e6a5641113d8e45549bdab63..ec808fa8dc213922c1feef96634f76bddca54c9a 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -2751,7 +2751,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "PF still in reset state, assigning new address." " Is the PF interface up?\n"); - random_ether_addr(hw->mac.addr); + dev_hw_addr_random(adapter->netdev, hw->mac.addr); } else { err = hw->mac.ops.read_mac_addr(hw); if (err) { diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index e3b5e949060136a619bee08d429a1a7e10c4ef2e..0b3f6df5cff78686d4324626fbc9c1966879e63c 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -82,7 +82,6 @@ struct ioc3_private { struct ioc3_etxd *txr; struct sk_buff *rx_skbs[512]; struct sk_buff *tx_skbs[128]; - struct net_device_stats stats; int rx_ci; /* RX consumer index */ int rx_pi; /* RX producer index */ int tx_ci; /* TX consumer index */ @@ -504,8 +503,8 @@ static struct net_device_stats *ioc3_get_stats(struct net_device *dev) struct ioc3_private *ip = netdev_priv(dev); struct ioc3 *ioc3 = ip->regs; - ip->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK); - return &ip->stats; + dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK); + return &dev->stats; } static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) @@ -576,8 +575,9 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) skb->ip_summed = CHECKSUM_UNNECESSARY; } -static inline void ioc3_rx(struct ioc3_private *ip) +static inline void ioc3_rx(struct net_device *dev) { + struct ioc3_private *ip = netdev_priv(dev); struct sk_buff *skb, *new_skb; struct ioc3 *ioc3 = ip->regs; int rx_entry, n_entry, len; @@ -598,13 +598,13 @@ static inline void ioc3_rx(struct ioc3_private *ip) if (err & ERXBUF_GOODPKT) { len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; skb_trim(skb, len); - skb->protocol = eth_type_trans(skb, priv_netdev(ip)); + skb->protocol = eth_type_trans(skb, dev); new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!new_skb) { /* Ouch, drop packet and just recycle packet to keep the ring filled. */ - ip->stats.rx_dropped++; + dev->stats.rx_dropped++; new_skb = skb; goto next; } @@ -622,19 +622,19 @@ static inline void ioc3_rx(struct ioc3_private *ip) rxb = (struct ioc3_erxbuf *) new_skb->data; skb_reserve(new_skb, RX_OFFSET); - ip->stats.rx_packets++; /* Statistics */ - ip->stats.rx_bytes += len; + dev->stats.rx_packets++; /* Statistics */ + dev->stats.rx_bytes += len; } else { - /* The frame is invalid and the skb never - reached the network layer so we can just - recycle it. */ - new_skb = skb; - ip->stats.rx_errors++; + /* The frame is invalid and the skb never + reached the network layer so we can just + recycle it. */ + new_skb = skb; + dev->stats.rx_errors++; } if (err & ERXBUF_CRCERR) /* Statistics */ - ip->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (err & ERXBUF_FRAMERR) - ip->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; next: ip->rx_skbs[n_entry] = new_skb; rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); @@ -652,8 +652,9 @@ static inline void ioc3_rx(struct ioc3_private *ip) ip->rx_ci = rx_entry; } -static inline void ioc3_tx(struct ioc3_private *ip) +static inline void ioc3_tx(struct net_device *dev) { + struct ioc3_private *ip = netdev_priv(dev); unsigned long packets, bytes; struct ioc3 *ioc3 = ip->regs; int tx_entry, o_entry; @@ -681,12 +682,12 @@ static inline void ioc3_tx(struct ioc3_private *ip) tx_entry = (etcir >> 7) & 127; } - ip->stats.tx_packets += packets; - ip->stats.tx_bytes += bytes; + dev->stats.tx_packets += packets; + dev->stats.tx_bytes += bytes; ip->txqlen -= packets; if (ip->txqlen < 128) - netif_wake_queue(priv_netdev(ip)); + netif_wake_queue(dev); ip->tx_ci = o_entry; spin_unlock(&ip->ioc3_lock); @@ -699,9 +700,9 @@ static inline void ioc3_tx(struct ioc3_private *ip) * with such error interrupts if something really goes wrong, so we might * also consider to take the interface down. */ -static void ioc3_error(struct ioc3_private *ip, u32 eisr) +static void ioc3_error(struct net_device *dev, u32 eisr) { - struct net_device *dev = priv_netdev(ip); + struct ioc3_private *ip = netdev_priv(dev); unsigned char *iface = dev->name; spin_lock(&ip->ioc3_lock); @@ -747,11 +748,11 @@ static irqreturn_t ioc3_interrupt(int irq, void *_dev) if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) - ioc3_error(ip, eisr); + ioc3_error(dev, eisr); if (eisr & EISR_RXTIMERINT) - ioc3_rx(ip); + ioc3_rx(dev); if (eisr & EISR_TXEXPLICIT) - ioc3_tx(ip); + ioc3_tx(dev); return IRQ_HANDLED; } diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h index 0dbd1932b72fde8cf15f30f7b3684c077b8a5163..36c3060411d28404de1454f57798979c2fb6e211 100644 --- a/drivers/net/irda/donauboe.h +++ b/drivers/net/irda/donauboe.h @@ -273,7 +273,7 @@ struct OboeSlot __u8 control; /*Slot control/status see below */ __u32 address; /*Slot buffer address */ } -__attribute__ ((packed)); +__packed; #define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h index ac0443d52e500c3294a00f781b2bb53bc45af043..58ddb52149167cc2cb43d1bbbf23d0bf462694ad 100644 --- a/drivers/net/irda/irda-usb.h +++ b/drivers/net/irda/irda-usb.h @@ -125,7 +125,7 @@ struct irda_class_desc { __u8 bmAdditionalBOFs; __u8 bIrdaRateSniff; __u8 bMaxUnicastList; -} __attribute__ ((packed)); +} __packed; /* class specific interface request to get the IrDA-USB class descriptor * (6.2.5, USB-IrDA class spec 1.0) */ diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c index b54d3b48045e7860b8a1ec91665e2107f35a85da..1046014dd6c2e1a19fbfe2f23163dbdc8a36c4c0 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/net/irda/ks959-sir.c @@ -154,7 +154,7 @@ struct ks959_speedparams { __le32 baudrate; /* baud rate, little endian */ __u8 flags; __u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; #define KS_DATA_5_BITS 0x00 #define KS_DATA_6_BITS 0x01 diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c index 8d713ebac15ba162e5b7fd65e5438fac27e626fa..9cc142fcc712de9b2ecf53e6983b3d59042d12f5 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/net/irda/ksdazzle-sir.c @@ -117,7 +117,7 @@ struct ksdazzle_speedparams { __le32 baudrate; /* baud rate, little endian */ __u8 flags; __u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; #define KS_DATA_5_BITS 0x00 #define KS_DATA_6_BITS 0x01 diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index 9a828b06a57e656ae198bcc0d8a7cfcb7121706e..edd5666f0ffbde3e7bdce003278e93b44227ad05 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c @@ -749,7 +749,7 @@ static int __devinit sh_irda_probe(struct platform_device *pdev) struct sh_irda_self *self; struct resource *res; char clk_name[8]; - unsigned int irq; + int irq; int err = -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 5c5f99d50341d5e981225d14fa3951fd59a3b596..00b38bccd6d0fd51f19cfe383f71a1f5952d6a43 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -709,7 +709,7 @@ static int __devinit sh_sir_probe(struct platform_device *pdev) struct sh_sir_self *self; struct resource *res; char clk_name[8]; - unsigned int irq; + int irq; int err = -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index d67e48418e55c3a5fa085231c7fe462e491703d7..850ca1c5ee19cc024cb41aff9164b459ab999e46 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -2848,9 +2848,7 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, unsigned short ss_device = 0x0000; int ret = 0; - dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); - - while (dev != NULL) { + for_each_pci_dev(dev) { struct smsc_ircc_subsystem_configuration *conf; /* @@ -2899,7 +2897,6 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, ret = -ENODEV; } } - dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); } return ret; diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index 3050d1a0cccf72a9564782aba454dcdc20af98e4..3f24a1f330220aedc7f584265fd3c9fe55cec8d5 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h @@ -544,9 +544,9 @@ struct ring_descr_hw { struct { u8 addr_res[3]; volatile u8 status; /* descriptor status */ - } __attribute__((packed)) rd_s; - } __attribute((packed)) rd_u; -} __attribute__ ((packed)); + } __packed rd_s; + } __packed rd_u; +} __packed; #define rd_addr rd_u.addr #define rd_status rd_u.rd_s.status diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index ffae480587ae909fa482dda292f8802ca15565c8..9e15eb93860eb4686ad79ca5058de708ff2bda72 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -44,11 +44,9 @@ #include #endif -#define PFX "ixgbe: " -#define DPRINTK(nlevel, klevel, fmt, args...) \ - ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ - printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ - __func__ , ## args))) +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* TX/RX descriptor defines */ #define IXGBE_DEFAULT_TXD 512 @@ -112,7 +110,6 @@ struct vf_data_storage { u16 vlans_enabled; bool clear_to_send; bool pf_set_mac; - int rar; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; }; diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index a4e2901f2f08e5bc585fb5889c174d754a2672e3..3e06a61da9212bdd61563221f1cd3567fee4d200 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -206,6 +206,14 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, s32 status = 0; u32 autoc = 0; + /* Determine 1G link capabilities off of SFP+ type */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = true; + goto out; + } + /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not been @@ -707,9 +715,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) - netif_info(adapter, hw, adapter->netdev, "Smartspeed has" - " downgraded the link speed from the maximum" - " advertised\n"); + e_info(hw, "Smartspeed has downgraded the link speed from " + "the maximum advertised\n"); return status; } @@ -2088,6 +2095,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; u16 ext_ability = 0; u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; hw->phy.ops.identify(hw); @@ -2167,12 +2175,16 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) case ixgbe_phy_sfp_ftl: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; break; default: break; diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 3080afb12bdfef2641170bfe0269dc8210b82772..5cf15aa11cac01cf69c3ef9c76e96efe07c7691b 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -105,12 +105,23 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) -#ifdef DEBUG -extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw); +extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw); #define hw_dbg(hw, format, arg...) \ - printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg) -#else -#define hw_dbg(hw, format, arg...) do {} while (0) -#endif - + netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(&adapter->pdev->dev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) #endif /* IXGBE_COMMON */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 4f7a26ab411e06ae0ecc19c5df963ed02dba0f35..25b02fb425ac3209aa6d4fe6fa9203525da583d0 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -346,7 +346,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, */ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); reg &= ~IXGBE_MFLCN_RFCE; - reg |= IXGBE_MFLCN_RPFCE; + reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); out: return 0; diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 71da325dfa800bc73a17df67cfa3a3c2af3037cd..b53b465e24af471f32e72d2d191d98751d43b6d3 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -121,7 +121,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) goto out; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n"); + e_err(drv, "Enable failed, needs MSI-X\n"); err = 1; goto out; } diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 3a93a81872b87153404111152bf5860307815772..dcebc82c6f4de3a4ae0134f802cbf5c7f8e8d302 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -54,14 +54,14 @@ struct ixgbe_stats { sizeof(((struct ixgbe_adapter *)0)->m), \ offsetof(struct ixgbe_adapter, m) #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ - sizeof(((struct net_device *)0)->m), \ - offsetof(struct net_device, m) + sizeof(((struct rtnl_link_stats64 *)0)->m), \ + offsetof(struct rtnl_link_stats64, m) static struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)}, - {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)}, - {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)}, - {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)}, + {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, + {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, + {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, @@ -69,27 +69,27 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { {"lsc_int", IXGBE_STAT(lsc_int)}, {"tx_busy", IXGBE_STAT(tx_busy)}, {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, - {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)}, - {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)}, - {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)}, - {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)}, - {"multicast", IXGBE_NETDEV_STAT(stats.multicast)}, + {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, + {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, + {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, + {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, + {"multicast", IXGBE_NETDEV_STAT(multicast)}, {"broadcast", IXGBE_STAT(stats.bprc)}, {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, - {"collisions", IXGBE_NETDEV_STAT(stats.collisions)}, - {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)}, - {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)}, - {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)}, + {"collisions", IXGBE_NETDEV_STAT(collisions)}, + {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, + {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, + {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, - {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)}, - {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)}, - {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)}, - {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)}, - {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)}, - {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)}, + {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, + {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, + {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, + {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, + {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, {"tx_restart_queue", IXGBE_STAT(restart_queue)}, {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, @@ -234,6 +234,13 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_sfp_type_not_present: ecmd->port = PORT_NONE; break; + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + ecmd->port = PORT_TP; + ecmd->supported = SUPPORTED_TP; + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + break; case ixgbe_sfp_type_unknown: default: ecmd->port = PORT_OTHER; @@ -294,8 +301,7 @@ static int ixgbe_set_settings(struct net_device *netdev, hw->mac.autotry_restart = true; err = hw->mac.ops.setup_link(hw, advertised, true, true); if (err) { - DPRINTK(PROBE, INFO, - "setup link failed with code %d\n", err); + e_info(probe, "setup link failed with code %d\n", err); hw->mac.ops.setup_link(hw, old, true, true); } } else { @@ -992,16 +998,18 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); u64 *queue_stat; int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; int j, k; int i; char *p = NULL; ixgbe_update_stats(adapter); - dev_get_stats(netdev); + net_stats = dev_get_stats(netdev, &temp); for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { switch (ixgbe_gstrings_stats[i].type) { case NETDEV_STATS: - p = (char *) netdev + + p = (char *) net_stats + ixgbe_gstrings_stats[i].stat_offset; break; case IXGBE_STATS: @@ -1188,9 +1196,9 @@ static struct ixgbe_reg_test reg_test_82598[] = { writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ val = readl(adapter->hw.hw_addr + R); \ if (val != (_test[pat] & W & M)) { \ - DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ - "0x%08X expected 0x%08X\n", \ - R, val, (_test[pat] & W & M)); \ + e_err(drv, "pattern test reg %04X failed: got " \ + "0x%08X expected 0x%08X\n", \ + R, val, (_test[pat] & W & M)); \ *data = R; \ writel(before, adapter->hw.hw_addr + R); \ return 1; \ @@ -1206,8 +1214,8 @@ static struct ixgbe_reg_test reg_test_82598[] = { writel((W & M), (adapter->hw.hw_addr + R)); \ val = readl(adapter->hw.hw_addr + R); \ if ((W & M) != (val & M)) { \ - DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ - "expected 0x%08X\n", R, (val & M), (W & M)); \ + e_err(drv, "set/check reg %04X test failed: got 0x%08X " \ + "expected 0x%08X\n", R, (val & M), (W & M)); \ *data = R; \ writel(before, (adapter->hw.hw_addr + R)); \ return 1; \ @@ -1240,8 +1248,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; if (value != after) { - DPRINTK(DRV, ERR, "failed STATUS register test got: " - "0x%08X expected: 0x%08X\n", after, value); + e_err(drv, "failed STATUS register test got: 0x%08X " + "expected: 0x%08X\n", after, value); *data = 1; return 1; } @@ -1341,8 +1349,8 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) *data = 1; return -1; } - DPRINTK(HW, INFO, "testing %s interrupt\n", - (shared_int ? "shared" : "unshared")); + e_info(hw, "testing %s interrupt\n", shared_int ? + "shared" : "unshared"); /* Disable all the interrupts */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); @@ -1847,7 +1855,7 @@ static void ixgbe_diag_test(struct net_device *netdev, if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ - DPRINTK(HW, INFO, "offline testing starting\n"); + e_info(hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ @@ -1880,17 +1888,17 @@ static void ixgbe_diag_test(struct net_device *netdev, else ixgbe_reset(adapter); - DPRINTK(HW, INFO, "register testing starting\n"); + e_info(hw, "register testing starting\n"); if (ixgbe_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; ixgbe_reset(adapter); - DPRINTK(HW, INFO, "eeprom testing starting\n"); + e_info(hw, "eeprom testing starting\n"); if (ixgbe_eeprom_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; ixgbe_reset(adapter); - DPRINTK(HW, INFO, "interrupt testing starting\n"); + e_info(hw, "interrupt testing starting\n"); if (ixgbe_intr_test(adapter, &data[2])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1898,14 +1906,14 @@ static void ixgbe_diag_test(struct net_device *netdev, * loopback diagnostic. */ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { - DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT " - "mode\n"); + e_info(hw, "Skip MAC loopback diagnostic in VT " + "mode\n"); data[3] = 0; goto skip_loopback; } ixgbe_reset(adapter); - DPRINTK(HW, INFO, "loopback testing starting\n"); + e_info(hw, "loopback testing starting\n"); if (ixgbe_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1916,7 +1924,7 @@ static void ixgbe_diag_test(struct net_device *netdev, if (if_running) dev_open(netdev); } else { - DPRINTK(HW, INFO, "online testing starting\n"); + e_info(hw, "online testing starting\n"); /* Online tests */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -2134,8 +2142,8 @@ static int ixgbe_set_coalesce(struct net_device *netdev, adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; if (netdev->features & NETIF_F_LRO) { netdev->features &= ~NETIF_F_LRO; - DPRINTK(PROBE, INFO, "rx-usecs set to 0, " - "disabling LRO/RSC\n"); + e_info(probe, "rx-usecs set to 0, " + "disabling RSC\n"); } need_reset = true; } @@ -2208,8 +2216,11 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); bool need_reset = false; + int rc; - ethtool_op_set_flags(netdev, data); + rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE); + if (rc) + return rc; /* if state changes we need to update adapter->flags and reset */ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { @@ -2230,10 +2241,10 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) break; } } else if (!adapter->rx_itr_setting) { - netdev->features &= ~ETH_FLAG_LRO; + netdev->features &= ~NETIF_F_LRO; if (data & ETH_FLAG_LRO) - DPRINTK(PROBE, INFO, "rx-usecs set to 0, " - "LRO/RSC cannot be enabled.\n"); + e_info(probe, "rx-usecs set to 0, " + "LRO/RSC cannot be enabled.\n"); } } diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 45182ab41d6b19d3acc973275edfbb378b7bd64b..072327c5e41ab287e4ad9598145f6d3f60283f12 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -25,7 +25,6 @@ *******************************************************************************/ - #include "ixgbe.h" #ifdef CONFIG_IXGBE_DCB #include "ixgbe_dcb_82599.h" @@ -165,20 +164,20 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, adapter = netdev_priv(netdev); if (xid >= IXGBE_FCOE_DDP_MAX) { - DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid); + e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } fcoe = &adapter->fcoe; if (!fcoe->pool) { - DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid); + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); return 0; } ddp = &fcoe->ddp[xid]; if (ddp->sgl) { - DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n", - xid, ddp->sgl, ddp->sgc); + e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); return 0; } ixgbe_fcoe_clear_ddp(ddp); @@ -186,14 +185,14 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, /* setup dma from scsi command sgl */ dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); if (dmacount == 0) { - DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid); + e_err(drv, "xid 0x%x DMA map error\n", xid); return 0; } /* alloc the udl from our ddp pool */ - ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp); + ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { - DPRINTK(DRV, ERR, "failed allocated ddp context\n"); + e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; } ddp->sgl = sgl; @@ -206,10 +205,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, while (len) { /* max number of buffers allowed in one DDP context */ if (j >= IXGBE_BUFFCNT_MAX) { - netif_err(adapter, drv, adapter->netdev, - "xid=%x:%d,%d,%d:addr=%llx " - "not enough descriptors\n", - xid, i, j, dmacount, (u64)addr); + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " + "not enough descriptors\n", + xid, i, j, dmacount, (u64)addr); goto out_noddp_free; } @@ -387,8 +385,8 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, struct fc_frame_header *fh; if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { - DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", - skb_shinfo(skb)->gso_type); + e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); return -EINVAL; } @@ -414,7 +412,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; break; default: - DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof); + e_warn(drv, "unknown sof = 0x%x\n", sof); return -EINVAL; } @@ -441,7 +439,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; break; default: - DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof); + e_warn(drv, "unknown eof = 0x%x\n", eof); return -EINVAL; } @@ -517,8 +515,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) adapter->pdev, IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN, PAGE_SIZE); if (!fcoe->pool) - DPRINTK(DRV, ERR, - "failed to allocated FCoE DDP pool\n"); + e_err(drv, "failed to allocated FCoE DDP pool\n"); spin_lock_init(&fcoe->lock); } @@ -614,7 +611,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) goto out_enable; - DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n"); + e_info(drv, "Enabling FCoE offload features.\n"); if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); @@ -625,9 +622,6 @@ int ixgbe_fcoe_enable(struct net_device *netdev) netdev->features |= NETIF_F_FCOE_CRC; netdev->features |= NETIF_F_FSO; netdev->features |= NETIF_F_FCOE_MTU; - netdev->vlan_features |= NETIF_F_FCOE_CRC; - netdev->vlan_features |= NETIF_F_FSO; - netdev->vlan_features |= NETIF_F_FCOE_MTU; netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; ixgbe_init_interrupt_scheme(adapter); @@ -660,25 +654,21 @@ int ixgbe_fcoe_disable(struct net_device *netdev) if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) goto out_disable; - DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n"); + e_info(drv, "Disabling FCoE offload features.\n"); + netdev->features &= ~NETIF_F_FCOE_CRC; + netdev->features &= ~NETIF_F_FSO; + netdev->features &= ~NETIF_F_FCOE_MTU; + netdev->fcoe_ddp_xid = 0; + netdev_features_change(netdev); + if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->ring_feature[RING_F_FCOE].indices = 0; - netdev->features &= ~NETIF_F_FCOE_CRC; - netdev->features &= ~NETIF_F_FSO; - netdev->features &= ~NETIF_F_FCOE_MTU; - netdev->vlan_features &= ~NETIF_F_FCOE_CRC; - netdev->vlan_features &= ~NETIF_F_FSO; - netdev->vlan_features &= ~NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = 0; - ixgbe_cleanup_fcoe(adapter); ixgbe_init_interrupt_scheme(adapter); - netdev_features_change(netdev); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 74d9b6df3029f3c67e9e85ff5a95a14c5ef6ff0b..7d6a415bcf885633999e10ed795df49876d6c47e 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "2.0.62-k2" +#define DRV_VERSION "2.0.84-k2" const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; @@ -696,19 +696,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, /* detected Tx unit hang */ union ixgbe_adv_tx_desc *tx_desc; tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); - DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" - " TDH, TDT <%x>, <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "tx_buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->queue_index, - IXGBE_READ_REG(hw, tx_ring->head), - IXGBE_READ_REG(hw, tx_ring->tail), - tx_ring->next_to_use, eop, - tx_ring->tx_buffer_info[eop].time_stamp, jiffies); + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, tx_ring->head), + IXGBE_READ_REG(hw, tx_ring->tail), + tx_ring->next_to_use, eop, + tx_ring->tx_buffer_info[eop].time_stamp, jiffies); return true; } @@ -812,9 +812,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (adapter->detect_tx_hung) { if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { /* schedule immediate reset if we believe we hung */ - DPRINTK(PROBE, INFO, - "tx hang %d detected, resetting adapter\n", - adapter->tx_timeout_count + 1); + e_info(probe, "tx hang %d detected, resetting " + "adapter\n", adapter->tx_timeout_count + 1); ixgbe_tx_timeout(adapter->netdev); } } @@ -1653,10 +1652,10 @@ static void ixgbe_check_overtemp_task(struct work_struct *work) return; break; } - DPRINTK(DRV, ERR, "Network adapter has been stopped because it " - "has over heated. Restart the computer. If the problem " - "persists, power off the system and replace the " - "adapter\n"); + e_crit(drv, "Network adapter has been stopped because it has " + "over heated. Restart the computer. If the problem " + "persists, power off the system and replace the " + "adapter\n"); /* write to clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); } @@ -1668,7 +1667,7 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && (eicr & IXGBE_EICR_GPI_SDP1)) { - DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); + e_crit(probe, "Fan has stopped, replace the adapter\n"); /* write to clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); } @@ -2154,9 +2153,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) handler, 0, adapter->name[vector], adapter->q_vector[vector]); if (err) { - DPRINTK(PROBE, ERR, - "request_irq failed for MSIX interrupt " - "Error: %d\n", err); + e_err(probe, "request_irq failed for MSIX interrupt " + "Error: %d\n", err); goto free_queue_irqs; } } @@ -2165,8 +2163,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, ixgbe_msix_lsc, 0, adapter->name[vector], netdev); if (err) { - DPRINTK(PROBE, ERR, - "request_irq for msix_lsc failed: %d\n", err); + e_err(probe, "request_irq for msix_lsc failed: %d\n", err); goto free_queue_irqs; } @@ -2352,7 +2349,7 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) } if (err) - DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); + e_err(probe, "request_irq failed, Error %d\n", err); return err; } @@ -2423,7 +2420,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) map_vector_to_rxq(adapter, 0, 0); map_vector_to_txq(adapter, 0, 0); - DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); + e_info(hw, "Legacy interrupt IVAR setup done\n"); } /** @@ -2803,10 +2800,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) /* Perform hash on these packet types */ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV4_UDP | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; } IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); @@ -2994,6 +2989,48 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int ixgbe_write_uc_addr_list(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + unsigned int vfn = adapter->num_vfs; + unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + /* return error if we do not support writing to RAR table */ + if (!hw->mac.ops.set_rar) + return -ENOMEM; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, + vfn, IXGBE_RAH_AV); + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) + hw->mac.ops.clear_rar(hw, rar_entries); + + return count; +} + /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -3007,38 +3044,58 @@ void ixgbe_set_rx_mode(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - u32 fctrl; + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); /* don't hardware filter vlans in promisc mode */ ixgbe_vlan_filter_disable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; - fctrl &= ~IXGBE_FCTRL_UPE; - } else if (!hw->addr_ctrl.uc_set_promisc) { - fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= IXGBE_VMOLR_MPE; + } else { + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscous mode so + * that we can at least receive multicast traffic + */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + vmolr |= IXGBE_VMOLR_ROMPE; } ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = false; + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscous mode + */ + count = ixgbe_write_uc_addr_list(netdev); + if (count < 0) { + fctrl |= IXGBE_FCTRL_UPE; + vmolr |= IXGBE_VMOLR_ROPE; + } } - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - - /* reprogram secondary unicast list */ - hw->mac.ops.update_uc_addr_list(hw, netdev); - - /* reprogram multicast list */ - hw->mac.ops.update_mc_addr_list(hw, netdev); - - if (adapter->num_vfs) + if (adapter->num_vfs) { ixgbe_restore_vf_multicasts(adapter); + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_ROPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) @@ -3257,8 +3314,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, msleep(1); } if (k >= IXGBE_MAX_RX_DESC_POLL) { - DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " - "not set within the polling period\n", rxr); + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " + "the polling period\n", rxr); } ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], (adapter->rx_ring[rxr]->count - 1)); @@ -3387,8 +3444,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) - DPRINTK(DRV, ERR, "Could not enable " - "Tx Queue %d\n", j); + e_err(drv, "Could not enable Tx Queue %d\n", j); } } @@ -3436,8 +3492,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) - DPRINTK(DRV, CRIT, - "Fan has stopped, replace the adapter\n"); + e_crit(drv, "Fan has stopped, replace the adapter\n"); } /* @@ -3466,7 +3521,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) } else { err = ixgbe_non_sfp_link_config(hw); if (err) - DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); + e_err(probe, "link_config FAILED %d\n", err); } for (i = 0; i < adapter->num_tx_queues; i++) @@ -3527,19 +3582,19 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) case IXGBE_ERR_SFP_NOT_PRESENT: break; case IXGBE_ERR_MASTER_REQUESTS_PENDING: - dev_err(&adapter->pdev->dev, "master disable timed out\n"); + e_dev_err("master disable timed out\n"); break; case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ - dev_warn(&adapter->pdev->dev, "This device is a pre-production " - "adapter/LOM. Please be aware there may be issues " - "associated with your hardware. If you are " - "experiencing problems please contact your Intel or " - "hardware representative who provided you with this " - "hardware.\n"); + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issuesassociated with " + "your hardware. If you are experiencing problems " + "please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); break; default: - dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); + e_dev_err("Hardware Error: %d\n", err); } /* reprogram the RAR[0] in case user changed it. */ @@ -3920,12 +3975,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) adapter->num_tx_queues = 1; #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n"); + e_info(probe, "FCoE enabled with DCB\n"); ixgbe_set_dcb_queues(adapter); } #endif if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n"); + e_info(probe, "FCoE enabled with RSS\n"); if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ixgbe_set_fdir_queues(adapter); @@ -4038,7 +4093,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, * This just means we'll go with either a single MSI * vector or fall back to legacy interrupts. */ - DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI-X interrupts\n"); adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; kfree(adapter->msix_entries); adapter->msix_entries = NULL; @@ -4435,8 +4491,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) if (!err) { adapter->flags |= IXGBE_FLAG_MSI_ENABLED; } else { - DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " - "falling back to legacy. Error: %d\n", err); + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI interrupt, " + "falling back to legacy. Error: %d\n", err); /* reset err */ err = 0; } @@ -4557,27 +4614,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) err = ixgbe_set_interrupt_capability(adapter); if (err) { - DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); + e_dev_err("Unable to setup interrupt capabilities\n"); goto err_set_interrupt; } err = ixgbe_alloc_q_vectors(adapter); if (err) { - DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " - "vectors\n"); + e_dev_err("Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } err = ixgbe_alloc_queues(adapter); if (err) { - DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); + e_dev_err("Unable to allocate memory for queues\n"); goto err_alloc_queues; } - DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " - "Tx Queue count = %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : - "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); set_bit(__IXGBE_DOWN, &adapter->state); @@ -4648,15 +4703,13 @@ static void ixgbe_sfp_task(struct work_struct *work) goto reschedule; ret = hw->phy.ops.reset(hw); if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { - dev_err(&adapter->pdev->dev, "failed to initialize " - "because an unsupported SFP+ module type " - "was detected.\n" - "Reload the driver after installing a " - "supported module.\n"); + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + e_dev_err("Reload the driver after installing a " + "supported module.\n"); unregister_netdev(adapter->netdev); } else { - DPRINTK(PROBE, INFO, "detected SFP+: %d\n", - hw->phy.sfp_type); + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); } /* don't need this routine any more */ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); @@ -4730,6 +4783,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCB /* Default traffic class to use for FCoE */ adapter->fcoe.tc = IXGBE_FCOE_DEFTC; + adapter->fcoe.up = IXGBE_FCOE_DEFTC; #endif #endif /* IXGBE_FCOE */ } @@ -4783,7 +4837,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /* initialize eeprom parameters */ if (ixgbe_init_eeprom_params_generic(hw)) { - dev_err(&pdev->dev, "EEPROM initialization failed\n"); + e_dev_err("EEPROM initialization failed\n"); return -EIO; } @@ -4836,8 +4890,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit " - "descriptor ring\n"); + e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } @@ -4859,7 +4912,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); if (!err) continue; - DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); + e_err(probe, "Allocation for Tx Queue %u failed\n", i); break; } @@ -4884,8 +4937,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) { - DPRINTK(PROBE, ERR, - "vmalloc allocation failed for the rx desc ring\n"); + e_err(probe, "vmalloc allocation failed for the Rx " + "descriptor ring\n"); goto alloc_failed; } memset(rx_ring->rx_buffer_info, 0, size); @@ -4898,8 +4951,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { - DPRINTK(PROBE, ERR, - "Memory allocation failed for the rx desc ring\n"); + e_err(probe, "Memory allocation failed for the Rx " + "descriptor ring\n"); vfree(rx_ring->rx_buffer_info); goto alloc_failed; } @@ -4932,7 +4985,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); if (!err) continue; - DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); + e_err(probe, "Allocation for Rx Queue %u failed\n", i); break; } @@ -5031,8 +5084,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) return -EINVAL; - DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; @@ -5145,8 +5197,7 @@ static int ixgbe_resume(struct pci_dev *pdev) err = pci_enable_device_mem(pdev); if (err) { - printk(KERN_ERR "ixgbe: Cannot enable PCI device from " - "suspend\n"); + e_dev_err("Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); @@ -5155,8 +5206,7 @@ static int ixgbe_resume(struct pci_dev *pdev) err = ixgbe_init_interrupt_scheme(adapter); if (err) { - printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " - "device\n"); + e_dev_err("Cannot initialize interrupts for device\n"); return err; } @@ -5517,10 +5567,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) err = hw->phy.ops.identify_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - dev_err(&adapter->pdev->dev, "failed to initialize because " - "an unsupported SFP+ module type was detected.\n" - "Reload the driver after installing a supported " - "module.\n"); + e_dev_err("failed to initialize because an unsupported SFP+ " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); unregister_netdev(adapter->netdev); return; } @@ -5549,8 +5599,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) set_bit(__IXGBE_FDIR_INIT_DONE, &(adapter->tx_ring[i]->reinit_state)); } else { - DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " - "ignored adding FDIR ATR filters\n"); + e_err(probe, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters\n"); } /* Done FDIR Re-initialization, enable transmits */ netif_tx_start_all_queues(adapter->netdev); @@ -5621,16 +5671,14 @@ static void ixgbe_watchdog_task(struct work_struct *work) flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); } - printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " - "Flow Control: %s\n", - netdev->name, + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? - "10 Gbps" : - (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? - "1 Gbps" : "unknown speed")), + "10 Gbps" : + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : "unknown speed")), ((flow_rx && flow_tx) ? "RX/TX" : - (flow_rx ? "RX" : - (flow_tx ? "TX" : "None")))); + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); netif_carrier_on(netdev); } else { @@ -5641,8 +5689,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) adapter->link_up = false; adapter->link_speed = 0; if (netif_carrier_ok(netdev)) { - printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", - netdev->name); + e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); } } @@ -5818,9 +5865,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, break; default: if (unlikely(net_ratelimit())) { - DPRINTK(PROBE, WARNING, - "partial checksum but proto=%x!\n", - skb->protocol); + e_warn(probe, "partial checksum " + "but proto=%x!\n", + skb->protocol); } break; } @@ -5931,7 +5978,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, return count; dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); + e_dev_err("TX DMA map failed\n"); /* clear timestamp and dma mappings for failed tx_buffer_info map */ tx_buffer_info->dma = 0; @@ -6101,21 +6148,26 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) struct ixgbe_adapter *adapter = netdev_priv(dev); int txq = smp_processor_id(); +#ifdef IXGBE_FCOE + if ((skb->protocol == htons(ETH_P_FCOE)) || + (skb->protocol == htons(ETH_P_FIP))) { + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); + txq += adapter->ring_feature[RING_F_FCOE].mask; + return txq; + } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + txq = adapter->fcoe.up; + return txq; + } + } +#endif + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { while (unlikely(txq >= dev->real_num_tx_queues)) txq -= dev->real_num_tx_queues; return txq; } -#ifdef IXGBE_FCOE - if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - ((skb->protocol == htons(ETH_P_FCOE)) || - (skb->protocol == htons(ETH_P_FIP)))) { - txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); - txq += adapter->ring_feature[RING_F_FCOE].mask; - return txq; - } -#endif if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (skb->priority == TC_PRIO_CONTROL) txq = adapter->ring_feature[RING_F_DCB].indices-1; @@ -6159,18 +6211,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, tx_ring = adapter->tx_ring[skb->queue_mapping]; #ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { -#ifdef CONFIG_IXGBE_DCB - /* for FCoE with DCB, we force the priority to what - * was specified by the switch */ - if ((skb->protocol == htons(ETH_P_FCOE)) || - (skb->protocol == htons(ETH_P_FIP))) { - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK - << IXGBE_TX_FLAGS_VLAN_SHIFT); - tx_flags |= ((adapter->fcoe.up << 13) - << IXGBE_TX_FLAGS_VLAN_SHIFT); - } -#endif + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && + (skb->protocol == htons(ETH_P_FCOE) || + skb->protocol == htons(ETH_P_FIP))) { + tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK + << IXGBE_TX_FLAGS_VLAN_SHIFT); + tx_flags |= ((adapter->fcoe.up << 13) + << IXGBE_TX_FLAGS_VLAN_SHIFT); /* flag for FCoE offloads */ if (skb->protocol == htons(ETH_P_FCOE)) tx_flags |= IXGBE_TX_FLAGS_FCOE; @@ -6430,8 +6479,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); if (err) { - DPRINTK(PROBE, ERR, - "Failed to enable PCI sriov: %d\n", err); + e_err(probe, "Failed to enable PCI sriov: %d\n", err); goto err_novfs; } /* If call to enable VFs succeeded then allocate memory @@ -6455,9 +6503,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, } /* Oh oh */ - DPRINTK(PROBE, ERR, - "Unable to allocate memory for VF " - "Data Storage - SRIOV disabled\n"); + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); pci_disable_sriov(adapter->pdev); err_novfs: @@ -6514,8 +6561,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_dma; } } @@ -6526,7 +6573,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, IORESOURCE_MEM), ixgbe_driver_name); if (err) { dev_err(&pdev->dev, - "pci_request_selected_regions failed 0x%x\n", err); + "pci_request_selected_regions failed 0x%x\n", err); goto err_pci_reg; } @@ -6637,8 +6684,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) - DPRINTK(PROBE, CRIT, - "Fan has stopped, replace the adapter\n"); + e_crit(probe, "Fan has stopped, replace the adapter\n"); } /* reset_hw fills in the perm_addr as well */ @@ -6657,13 +6703,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, round_jiffies(jiffies + (2 * HZ))); err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - dev_err(&adapter->pdev->dev, "failed to initialize because " - "an unsupported SFP+ module type was detected.\n" - "Reload the driver after installing a supported " - "module.\n"); + e_dev_err("failed to initialize because an unsupported SFP+ " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); goto err_sw_init; } else if (err) { - dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); + e_dev_err("HW Init failed: %d\n", err); goto err_sw_init; } @@ -6707,6 +6753,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } } + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + netdev->vlan_features |= NETIF_F_FCOE_CRC; + netdev->vlan_features |= NETIF_F_FSO; + netdev->vlan_features |= NETIF_F_FCOE_MTU; + } #endif /* IXGBE_FCOE */ if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -6716,7 +6767,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { - dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); + e_dev_err("The EEPROM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } @@ -6725,7 +6776,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); if (ixgbe_validate_mac_addr(netdev->perm_addr)) { - dev_err(&pdev->dev, "invalid MAC address\n"); + e_dev_err("invalid MAC address\n"); err = -EIO; goto err_eeprom; } @@ -6760,7 +6811,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, hw->mac.ops.get_bus_info(hw); /* print bus type/speed/width info */ - dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", + e_dev_info("(PCI Express:%s:%s) %pM\n", ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : @@ -6770,20 +6821,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->dev_addr); ixgbe_read_pba_num_generic(hw, &part_num); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n", - hw->mac.type, hw->phy.type, hw->phy.sfp_type, - (part_num >> 8), (part_num & 0xff)); + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " + "PBA No: %06x-%03x\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, + (part_num >> 8), (part_num & 0xff)); else - dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", - hw->mac.type, hw->phy.type, - (part_num >> 8), (part_num & 0xff)); + e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", + hw->mac.type, hw->phy.type, + (part_num >> 8), (part_num & 0xff)); if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { - dev_warn(&pdev->dev, "PCI-Express bandwidth available for " - "this card is not sufficient for optimal " - "performance.\n"); - dev_warn(&pdev->dev, "For optimal performance a x8 " - "PCI-Express slot is required.\n"); + e_dev_warn("PCI-Express bandwidth available for this card is " + "not sufficient for optimal performance.\n"); + e_dev_warn("For optimal performance a x8 PCI-Express slot " + "is required.\n"); } /* save off EEPROM version number */ @@ -6794,12 +6845,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err == IXGBE_ERR_EEPROM_VERSION) { /* We are running on a pre-production device, log a warning */ - dev_warn(&pdev->dev, "This device is a pre-production " - "adapter/LOM. Please be aware there may be issues " - "associated with your hardware. If you are " - "experiencing problems please contact your Intel or " - "hardware representative who provided you with this " - "hardware.\n"); + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); } strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); @@ -6822,8 +6873,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } #endif if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", - adapter->num_vfs); + e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); for (i = 0; i < adapter->num_vfs; i++) ixgbe_vf_configuration(pdev, (i | 0x10000000)); } @@ -6831,7 +6881,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); - dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); + e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); cards_found++; return 0; @@ -6921,7 +6971,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); - DPRINTK(PROBE, INFO, "complete\n"); + e_dev_info("complete\n"); free_netdev(netdev); @@ -6971,8 +7021,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) int err; if (pci_enable_device_mem(pdev)) { - DPRINTK(PROBE, ERR, - "Cannot re-enable PCI device after reset.\n"); + e_err(probe, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); @@ -6988,8 +7037,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err) { - dev_err(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); + e_dev_err("pci_cleanup_aer_uncorrect_error_status " + "failed 0x%0x\n", err); /* non-fatal, continue */ } @@ -7010,7 +7059,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev) if (netif_running(netdev)) { if (ixgbe_up(adapter)) { - DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); + e_info(probe, "ixgbe_up failed after reset\n"); return; } } @@ -7046,10 +7095,9 @@ static struct pci_driver ixgbe_driver = { static int __init ixgbe_init_module(void) { int ret; - printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, - ixgbe_driver_string, ixgbe_driver_version); - - printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); + pr_info("%s - version %s\n", ixgbe_driver_string, + ixgbe_driver_version); + pr_info("%s\n", ixgbe_copyright); #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); @@ -7088,18 +7136,17 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, } #endif /* CONFIG_IXGBE_DCA */ -#ifdef DEBUG + /** - * ixgbe_get_hw_dev_name - return device name string + * ixgbe_get_hw_dev return device * used by hardware layer to print debugging information **/ -char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) +struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; - return adapter->netdev->name; + return adapter->netdev; } -#endif module_exit(ixgbe_exit_module); /* ixgbe_main.c */ diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 48325a5beff2054c59b2aa9f6b333bcdf4d3d0c8..6c0d42e33f21770092668b3c3405223c3e55d09b 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -577,6 +577,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) * 6 SFP_SR/LR_CORE1 - 82599-specific * 7 SFP_act_lmt_DA_CORE0 - 82599-specific * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific */ if (hw->mac.type == ixgbe_mac_82598EB) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) @@ -625,6 +627,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) else hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; else hw->phy.sfp_type = ixgbe_sfp_type_unknown; } @@ -696,8 +705,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) goto out; } - /* 1G SFP modules are not supported */ - if (comp_codes_10g == 0) { + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; @@ -711,7 +722,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) /* This is guaranteed to be 82599, no need to check for NULL */ hw->mac.ops.get_device_caps(hw, &enforce_sfp); - if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || + (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_sfp_intel) { status = 0; @@ -742,6 +755,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *data_offset) { u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) return IXGBE_ERR_SFP_NOT_SUPPORTED; @@ -753,6 +767,17 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) return IXGBE_ERR_SFP_NOT_SUPPORTED; + /* + * Limiting active cables and 1G Phys must be initialized as + * SR modules + */ + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_cu_core0) + sfp_type = ixgbe_sfp_type_srlr_core0; + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_cu_core1) + sfp_type = ixgbe_sfp_type_srlr_core1; + /* Read offset to PHY init contents */ hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); @@ -769,7 +794,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, hw->eeprom.ops.read(hw, *list_offset, &sfp_id); while (sfp_id != IXGBE_PHY_INIT_END_NL) { - if (sfp_id == hw->phy.sfp_type) { + if (sfp_id == sfp_type) { (*list_offset)++; hw->eeprom.ops.read(hw, *list_offset, data_offset); if ((!*data_offset) || (*data_offset == 0xFFFF)) { diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index ef4ba834c593e81220dfa78a9174646cb4289502..fb3898f12fc5519b1d01f2705323ef4271ef2730 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -48,6 +48,7 @@ #define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 #define IXGBE_SFF_1GBASESX_CAPABLE 0x1 #define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index f6cee94ec8e8522db891155fb3d131cd8df6dca4..49661a138e227d7caf74e70bc8ad026050fcdc32 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -25,7 +25,6 @@ *******************************************************************************/ - #include #include #include @@ -138,6 +137,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); /* reset offloads to defaults */ if (adapter->vfinfo[vf].pf_vlan) { @@ -159,26 +159,17 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) /* Flush and reset the mta with the new values */ ixgbe_set_rx_mode(adapter->netdev); - if (adapter->vfinfo[vf].rar > 0) { - adapter->hw.mac.ops.clear_rar(&adapter->hw, - adapter->vfinfo[vf].rar); - adapter->vfinfo[vf].rar = -1; - } + hw->mac.ops.clear_rar(hw, rar_entry); } int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, int vf, unsigned char *mac_addr) { struct ixgbe_hw *hw = &adapter->hw; - - adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr, - vf, IXGBE_RAH_AV); - if (adapter->vfinfo[vf].rar < 0) { - DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf); - return -1; - } + int rar_entry = hw->mac.num_rar_entries - (vf + 1); memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); + hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); return 0; } @@ -194,11 +185,8 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) if (enable) { random_ether_addr(vf_mac_addr); - DPRINTK(PROBE, INFO, "IOV: VF %d is enabled " - "mac %02X:%02X:%02X:%02X:%02X:%02X\n", - vfn, - vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2], - vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]); + e_info(probe, "IOV: VF %d is enabled MAC %pM\n", + vfn, vf_mac_addr); /* * Store away the VF "permananet" MAC address, it will ask * for it later. @@ -243,7 +231,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); if (retval) - printk(KERN_ERR "Error receiving message from VF\n"); + pr_err("Error receiving message from VF\n"); /* this is a message we already processed, do nothing */ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) @@ -257,7 +245,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) if (msgbuf[0] == IXGBE_VF_RESET) { unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; u8 *addr = (u8 *)(&msgbuf[1]); - DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf); + e_info(probe, "VF Reset msg received from vf %d\n", vf); adapter->vfinfo[vf].clear_to_send = false; ixgbe_vf_reset_msg(adapter, vf); adapter->vfinfo[vf].clear_to_send = true; @@ -310,7 +298,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); break; default: - DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]); + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; break; } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index cdd1998f18c705348a618098f8e098edda8d7e98..9587d975d66c6eeb31e82f9233ab82f876036134 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -2214,6 +2214,8 @@ enum ixgbe_sfp_type { ixgbe_sfp_type_srlr_core1 = 6, ixgbe_sfp_type_da_act_lmt_core0 = 7, ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, ixgbe_sfp_type_not_present = 0xFFFE, ixgbe_sfp_type_unknown = 0xFFFF }; diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index a16cff7e54a3ec8daea5f499ca40834c5adb3111..3e291ccc629d0971ef9d9ec2f23f2b2b93b65b1b 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -1463,18 +1463,10 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - struct net_device *v_netdev; /* add VID to filter table */ if (hw->mac.ops.set_vfta) hw->mac.ops.set_vfta(hw, vid, 0, true); - /* - * Copy feature flags from netdev to the vlan netdev for this vid. - * This allows things like TSO to bubble down to our vlan device. - */ - v_netdev = vlan_group_get_device(adapter->vlgrp, vid); - v_netdev->features |= adapter->netdev->features; - vlan_group_set_device(adapter->vlgrp, vid, v_netdev); } static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) @@ -2229,7 +2221,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) if (err) { dev_info(&pdev->dev, "PF still in reset state, assigning new address\n"); - random_ether_addr(hw->mac.addr); + dev_hw_addr_random(adapter->netdev, hw->mac.addr); } else { err = hw->mac.ops.init_hw(hw); if (err) { @@ -2935,7 +2927,8 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; - unsigned int offset = 0, size, count = 0; + unsigned int offset = 0, size; + int count = 0; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; int i; @@ -3401,7 +3394,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, /* setup the private structure */ err = ixgbevf_sw_init(adapter); -#ifdef MAX_SKB_FRAGS netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | @@ -3411,16 +3403,16 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_GRO; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; -#endif /* MAX_SKB_FRAGS */ - /* The HW MAC address was set and/or determined in sw_init */ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index 3e6aaf9e5ce7c4060b1bc042cf4d46608549297a..949c1f9336445685998a891905924973c3eb7ef1 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c @@ -82,11 +82,20 @@ static unsigned short known_revisions[] = static int jazzsonic_open(struct net_device* dev) { - if (request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, "sonic", dev)) { - printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); - return -EAGAIN; + int retval; + + retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, + "sonic", dev); + if (retval) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", + dev->name, dev->irq); + return retval; } - return sonic_open(dev); + + retval = sonic_open(dev); + if (retval) + free_irq(dev->irq, dev); + return retval; } static int jazzsonic_close(struct net_device* dev) diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c index f852ab3ae9cf2aaa86ff6ac9718d8b839bea6bdb..928b2b83cef509080c30eccd6c4d1c4cfe76fd58 100644 --- a/drivers/net/ks8842.c +++ b/drivers/net/ks8842.c @@ -18,6 +18,7 @@ /* Supports: * The Micrel KS8842 behind the timberdale FPGA + * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -29,11 +30,19 @@ #include #include #include +#include +#include +#include #define DRV_NAME "ks8842" /* Timberdale specific Registers */ -#define REG_TIMB_RST 0x1c +#define REG_TIMB_RST 0x1c +#define REG_TIMB_FIFO 0x20 +#define REG_TIMB_ISR 0x24 +#define REG_TIMB_IER 0x28 +#define REG_TIMB_IAR 0x2C +#define REQ_TIMB_DMA_RESUME 0x30 /* KS8842 registers */ @@ -76,6 +85,15 @@ #define IRQ_RX_ERROR 0x0080 #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) +/* When running via timberdale in DMA mode, the RX interrupt should be + enabled in the KS8842, but not in the FPGA IP, since the IP handles + RX DMA internally. + TX interrupts are not needed it is handled by the FPGA the driver is + notified via DMA callbacks. +*/ +#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ + IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) +#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) #define REG_ISR 0x02 #define REG_RXSR 0x04 #define RXSR_VALID 0x8000 @@ -114,14 +132,53 @@ #define REG_P1CR4 0x02 #define REG_P1SR 0x04 +/* flags passed by platform_device for configuration */ +#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ +#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ + +#define DMA_BUFFER_SIZE 2048 + +struct ks8842_tx_dma_ctl { + struct dma_chan *chan; + struct dma_async_tx_descriptor *adesc; + void *buf; + struct scatterlist sg; + int channel; +}; + +struct ks8842_rx_dma_ctl { + struct dma_chan *chan; + struct dma_async_tx_descriptor *adesc; + struct sk_buff *skb; + struct scatterlist sg; + struct tasklet_struct tasklet; + int channel; +}; + +#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ + ((adapter)->dma_rx.channel != -1)) + struct ks8842_adapter { void __iomem *hw_addr; int irq; + unsigned long conf_flags; /* copy of platform_device config */ struct tasklet_struct tasklet; spinlock_t lock; /* spinlock to be interrupt safe */ - struct platform_device *pdev; + struct work_struct timeout_work; + struct net_device *netdev; + struct device *dev; + struct ks8842_tx_dma_ctl dma_tx; + struct ks8842_rx_dma_ctl dma_rx; }; +static void ks8842_dma_rx_cb(void *data); +static void ks8842_dma_tx_cb(void *data); + +static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) +{ + iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); +} + static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) { iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); @@ -191,16 +248,21 @@ static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank, static void ks8842_reset(struct ks8842_adapter *adapter) { - /* The KS8842 goes haywire when doing softare reset - * a work around in the timberdale IP is implemented to - * do a hardware reset instead - ks8842_write16(adapter, 3, 1, REG_GRR); - msleep(10); - iowrite16(0, adapter->hw_addr + REG_GRR); - */ - iowrite16(32, adapter->hw_addr + REG_SELECT_BANK); - iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); - msleep(20); + if (adapter->conf_flags & MICREL_KS884X) { + ks8842_write16(adapter, 3, 1, REG_GRR); + msleep(10); + iowrite16(0, adapter->hw_addr + REG_GRR); + } else { + /* The KS8842 goes haywire when doing softare reset + * a work around in the timberdale IP is implemented to + * do a hardware reset instead + ks8842_write16(adapter, 3, 1, REG_GRR); + msleep(10); + iowrite16(0, adapter->hw_addr + REG_GRR); + */ + iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); + msleep(20); + } } static void ks8842_update_link_status(struct net_device *netdev, @@ -269,8 +331,6 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter) /* restart port auto-negotiation */ ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); - /* only advertise 10Mbps */ - ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4); /* Enable the transmitter */ ks8842_enable_tx(adapter); @@ -282,8 +342,19 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter) ks8842_write16(adapter, 18, 0xffff, REG_ISR); /* enable interrupts */ - ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); - + if (KS8842_USE_DMA(adapter)) { + /* When running in DMA Mode the RX interrupt is not enabled in + timberdale because RX data is received by DMA callbacks + it must still be enabled in the KS8842 because it indicates + to timberdale when there is RX data for it's DMA FIFOs */ + iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); + ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); + } else { + if (!(adapter->conf_flags & MICREL_KS884X)) + iowrite16(ENABLED_IRQS, + adapter->hw_addr + REG_TIMB_IER); + ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); + } /* enable the switch */ ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); } @@ -296,13 +367,28 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) for (i = 0; i < ETH_ALEN; i++) dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); - /* make sure the switch port uses the same MAC as the QMU */ - mac = ks8842_read16(adapter, 2, REG_MARL); - ks8842_write16(adapter, 39, mac, REG_MACAR1); - mac = ks8842_read16(adapter, 2, REG_MARM); - ks8842_write16(adapter, 39, mac, REG_MACAR2); - mac = ks8842_read16(adapter, 2, REG_MARH); - ks8842_write16(adapter, 39, mac, REG_MACAR3); + if (adapter->conf_flags & MICREL_KS884X) { + /* + the sequence of saving mac addr between MAC and Switch is + different. + */ + + mac = ks8842_read16(adapter, 2, REG_MARL); + ks8842_write16(adapter, 39, mac, REG_MACAR3); + mac = ks8842_read16(adapter, 2, REG_MARM); + ks8842_write16(adapter, 39, mac, REG_MACAR2); + mac = ks8842_read16(adapter, 2, REG_MARH); + ks8842_write16(adapter, 39, mac, REG_MACAR1); + } else { + + /* make sure the switch port uses the same MAC as the QMU */ + mac = ks8842_read16(adapter, 2, REG_MARL); + ks8842_write16(adapter, 39, mac, REG_MACAR1); + mac = ks8842_read16(adapter, 2, REG_MARM); + ks8842_write16(adapter, 39, mac, REG_MACAR2); + mac = ks8842_read16(adapter, 2, REG_MARH); + ks8842_write16(adapter, 39, mac, REG_MACAR3); + } } static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) @@ -313,8 +399,25 @@ static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) spin_lock_irqsave(&adapter->lock, flags); for (i = 0; i < ETH_ALEN; i++) { ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); - ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], - REG_MACAR1 + i); + if (!(adapter->conf_flags & MICREL_KS884X)) + ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], + REG_MACAR1 + i); + } + + if (adapter->conf_flags & MICREL_KS884X) { + /* + the sequence of saving mac addr between MAC and Switch is + different. + */ + + u16 mac; + + mac = ks8842_read16(adapter, 2, REG_MARL); + ks8842_write16(adapter, 39, mac, REG_MACAR3); + mac = ks8842_read16(adapter, 2, REG_MARM); + ks8842_write16(adapter, 39, mac, REG_MACAR2); + mac = ks8842_read16(adapter, 2, REG_MARH); + ks8842_write16(adapter, 39, mac, REG_MACAR1); } spin_unlock_irqrestore(&adapter->lock, flags); } @@ -324,15 +427,59 @@ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; } +static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; + u8 *buf = ctl->buf; + + if (ctl->adesc) { + netdev_dbg(netdev, "%s: TX ongoing\n", __func__); + /* transfer ongoing */ + return NETDEV_TX_BUSY; + } + + sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); + + /* copy data to the TX buffer */ + /* the control word, enable IRQ, port 1 and the length */ + *buf++ = 0x00; + *buf++ = 0x01; /* Port 1 */ + *buf++ = skb->len & 0xff; + *buf++ = (skb->len >> 8) & 0xff; + skb_copy_from_linear_data(skb, buf, skb->len); + + dma_sync_single_range_for_device(adapter->dev, + sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), + DMA_TO_DEVICE); + + /* make sure the length is a multiple of 4 */ + if (sg_dma_len(&ctl->sg) % 4) + sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; + + ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, + &ctl->sg, 1, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + if (!ctl->adesc) + return NETDEV_TX_BUSY; + + ctl->adesc->callback_param = netdev; + ctl->adesc->callback = ks8842_dma_tx_cb; + ctl->adesc->tx_submit(ctl->adesc); + + netdev->stats.tx_bytes += skb->len; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); int len = skb->len; - u32 *ptr = (u32 *)skb->data; - u32 ctrl; - dev_dbg(&adapter->pdev->dev, - "%s: len %u head %p data %p tail %p end %p\n", + netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n", __func__, skb->len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb)); @@ -340,17 +487,34 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) if (ks8842_tx_fifo_space(adapter) < len + 8) return NETDEV_TX_BUSY; - /* the control word, enable IRQ, port 1 and the length */ - ctrl = 0x8000 | 0x100 | (len << 16); - ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); + if (adapter->conf_flags & KS884X_16BIT) { + u16 *ptr16 = (u16 *)skb->data; + ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO); + ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI); + netdev->stats.tx_bytes += len; + + /* copy buffer */ + while (len > 0) { + iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO); + iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI); + len -= sizeof(u32); + } + } else { - netdev->stats.tx_bytes += len; + u32 *ptr = (u32 *)skb->data; + u32 ctrl; + /* the control word, enable IRQ, port 1 and the length */ + ctrl = 0x8000 | 0x100 | (len << 16); + ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); - /* copy buffer */ - while (len > 0) { - iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); - len -= sizeof(u32); - ptr++; + netdev->stats.tx_bytes += len; + + /* copy buffer */ + while (len > 0) { + iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); + len -= sizeof(u32); + ptr++; + } } /* enqueue packet */ @@ -361,54 +525,174 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } -static void ks8842_rx_frame(struct net_device *netdev, - struct ks8842_adapter *adapter) +static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) +{ + netdev_dbg(netdev, "RX error, status: %x\n", status); + + netdev->stats.rx_errors++; + if (status & RXSR_TOO_LONG) + netdev->stats.rx_length_errors++; + if (status & RXSR_CRC_ERROR) + netdev->stats.rx_crc_errors++; + if (status & RXSR_RUNT) + netdev->stats.rx_frame_errors++; +} + +static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, + int len) +{ + netdev_dbg(netdev, "RX packet, len: %d\n", len); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + if (status & RXSR_MULTICAST) + netdev->stats.multicast++; +} + +static int __ks8842_start_new_rx_dma(struct net_device *netdev) { - u32 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); - int len = (status >> 16) & 0x7ff; + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; + struct scatterlist *sg = &ctl->sg; + int err; - status &= 0xffff; + ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); + if (ctl->skb) { + sg_init_table(sg, 1); + sg_dma_address(sg) = dma_map_single(adapter->dev, + ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); + if (unlikely(err)) { + sg_dma_address(sg) = 0; + goto out; + } + + sg_dma_len(sg) = DMA_BUFFER_SIZE; + + ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, + sg, 1, DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + + if (!ctl->adesc) + goto out; + + ctl->adesc->callback_param = netdev; + ctl->adesc->callback = ks8842_dma_rx_cb; + ctl->adesc->tx_submit(ctl->adesc); + } else { + err = -ENOMEM; + sg_dma_address(sg) = 0; + goto out; + } - dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n", - __func__, status); + return err; +out: + if (sg_dma_address(sg)) + dma_unmap_single(adapter->dev, sg_dma_address(sg), + DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + sg_dma_address(sg) = 0; + if (ctl->skb) + dev_kfree_skb(ctl->skb); + + ctl->skb = NULL; + + printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); + return err; +} + +static void ks8842_rx_frame_dma_tasklet(unsigned long arg) +{ + struct net_device *netdev = (struct net_device *)arg; + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; + struct sk_buff *skb = ctl->skb; + dma_addr_t addr = sg_dma_address(&ctl->sg); + u32 status; + + ctl->adesc = NULL; + + /* kick next transfer going */ + __ks8842_start_new_rx_dma(netdev); + + /* now handle the data we got */ + dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + + status = *((u32 *)skb->data); + + netdev_dbg(netdev, "%s - rx_data: status: %x\n", + __func__, status & 0xffff); /* check the status */ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { - struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); + int len = (status >> 16) & 0x7ff; - dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", - __func__, len); - if (skb) { - u32 *data; + ks8842_update_rx_counters(netdev, status, len); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += len; - if (status & RXSR_MULTICAST) - netdev->stats.multicast++; + /* reserve 4 bytes which is the status word */ + skb_reserve(skb, 4); + skb_put(skb, len); - data = (u32 *)skb_put(skb, len); + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); + } else { + ks8842_update_rx_err_counters(netdev, status); + dev_kfree_skb(skb); + } +} - ks8842_select_bank(adapter, 17); - while (len > 0) { - *data++ = ioread32(adapter->hw_addr + - REG_QMU_DATA_LO); - len -= sizeof(u32); - } +static void ks8842_rx_frame(struct net_device *netdev, + struct ks8842_adapter *adapter) +{ + u32 status; + int len; + + if (adapter->conf_flags & KS884X_16BIT) { + status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO); + len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI); + netdev_dbg(netdev, "%s - rx_data: status: %x\n", + __func__, status); + } else { + status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); + len = (status >> 16) & 0x7ff; + status &= 0xffff; + netdev_dbg(netdev, "%s - rx_data: status: %x\n", + __func__, status); + } + /* check the status */ + if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { + struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); + + if (skb) { + + ks8842_update_rx_counters(netdev, status, len); + + if (adapter->conf_flags & KS884X_16BIT) { + u16 *data16 = (u16 *)skb_put(skb, len); + ks8842_select_bank(adapter, 17); + while (len > 0) { + *data16++ = ioread16(adapter->hw_addr + + REG_QMU_DATA_LO); + *data16++ = ioread16(adapter->hw_addr + + REG_QMU_DATA_HI); + len -= sizeof(u32); + } + } else { + u32 *data = (u32 *)skb_put(skb, len); + + ks8842_select_bank(adapter, 17); + while (len > 0) { + *data++ = ioread32(adapter->hw_addr + + REG_QMU_DATA_LO); + len -= sizeof(u32); + } + } skb->protocol = eth_type_trans(skb, netdev); netif_rx(skb); } else netdev->stats.rx_dropped++; - } else { - dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status); - netdev->stats.rx_errors++; - if (status & RXSR_TOO_LONG) - netdev->stats.rx_length_errors++; - if (status & RXSR_CRC_ERROR) - netdev->stats.rx_crc_errors++; - if (status & RXSR_RUNT) - netdev->stats.rx_frame_errors++; - } + } else + ks8842_update_rx_err_counters(netdev, status); /* set high watermark to 3K */ ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); @@ -423,8 +707,7 @@ static void ks8842_rx_frame(struct net_device *netdev, void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) { u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; - dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n", - __func__, rx_data); + netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); while (rx_data) { ks8842_rx_frame(netdev, adapter); rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; @@ -434,7 +717,7 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) { u16 sr = ks8842_read16(adapter, 16, REG_TXSR); - dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr); + netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); netdev->stats.tx_packets++; if (netif_queue_stopped(netdev)) netif_wake_queue(netdev); @@ -443,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) void ks8842_handle_rx_overrun(struct net_device *netdev, struct ks8842_adapter *adapter) { - dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); + netdev_dbg(netdev, "%s: entry\n", __func__); netdev->stats.rx_errors++; netdev->stats.rx_fifo_errors++; } @@ -462,20 +745,32 @@ void ks8842_tasklet(unsigned long arg) spin_unlock_irqrestore(&adapter->lock, flags); isr = ks8842_read16(adapter, 18, REG_ISR); - dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr); + netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); + + /* when running in DMA mode, do not ack RX interrupts, it is handled + internally by timberdale, otherwise it's DMA FIFO:s would stop + */ + if (KS8842_USE_DMA(adapter)) + isr &= ~IRQ_RX; /* Ack */ ks8842_write16(adapter, 18, isr, REG_ISR); + if (!(adapter->conf_flags & MICREL_KS884X)) + /* Ack in the timberdale IP as well */ + iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); + if (!netif_running(netdev)) return; if (isr & IRQ_LINK_CHANGE) ks8842_update_link_status(netdev, adapter); - if (isr & (IRQ_RX | IRQ_RX_ERROR)) + /* should not get IRQ_RX when running DMA mode */ + if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) ks8842_handle_rx(netdev, adapter); + /* should only happen when in PIO mode */ if (isr & IRQ_TX) ks8842_handle_tx(netdev, adapter); @@ -494,24 +789,38 @@ void ks8842_tasklet(unsigned long arg) /* re-enable interrupts, put back the bank selection register */ spin_lock_irqsave(&adapter->lock, flags); - ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); + if (KS8842_USE_DMA(adapter)) + ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); + else + ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); + + /* Make sure timberdale continues DMA operations, they are stopped while + we are handling the ks8842 because we might change bank */ + if (KS8842_USE_DMA(adapter)) + ks8842_resume_dma(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); } static irqreturn_t ks8842_irq(int irq, void *devid) { - struct ks8842_adapter *adapter = devid; + struct net_device *netdev = devid; + struct ks8842_adapter *adapter = netdev_priv(netdev); u16 isr; u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); irqreturn_t ret = IRQ_NONE; isr = ks8842_read16(adapter, 18, REG_ISR); - dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr); + netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); if (isr) { - /* disable IRQ */ - ks8842_write16(adapter, 18, 0x00, REG_IER); + if (KS8842_USE_DMA(adapter)) + /* disable all but RX IRQ, since the FPGA relies on it*/ + ks8842_write16(adapter, 18, IRQ_RX, REG_IER); + else + /* disable IRQ */ + ks8842_write16(adapter, 18, 0x00, REG_IER); /* schedule tasklet */ tasklet_schedule(&adapter->tasklet); @@ -521,9 +830,151 @@ static irqreturn_t ks8842_irq(int irq, void *devid) iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); + /* After an interrupt, tell timberdale to continue DMA operations. + DMA is disabled while we are handling the ks8842 because we might + change bank */ + ks8842_resume_dma(adapter); + return ret; } +static void ks8842_dma_rx_cb(void *data) +{ + struct net_device *netdev = data; + struct ks8842_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(netdev, "RX DMA finished\n"); + /* schedule tasklet */ + if (adapter->dma_rx.adesc) + tasklet_schedule(&adapter->dma_rx.tasklet); +} + +static void ks8842_dma_tx_cb(void *data) +{ + struct net_device *netdev = data; + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; + + netdev_dbg(netdev, "TX DMA finished\n"); + + if (!ctl->adesc) + return; + + netdev->stats.tx_packets++; + ctl->adesc = NULL; + + if (netif_queue_stopped(netdev)) + netif_wake_queue(netdev); +} + +static void ks8842_stop_dma(struct ks8842_adapter *adapter) +{ + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; + + tx_ctl->adesc = NULL; + if (tx_ctl->chan) + tx_ctl->chan->device->device_control(tx_ctl->chan, + DMA_TERMINATE_ALL, 0); + + rx_ctl->adesc = NULL; + if (rx_ctl->chan) + rx_ctl->chan->device->device_control(rx_ctl->chan, + DMA_TERMINATE_ALL, 0); + + if (sg_dma_address(&rx_ctl->sg)) + dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), + DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + sg_dma_address(&rx_ctl->sg) = 0; + + dev_kfree_skb(rx_ctl->skb); + rx_ctl->skb = NULL; +} + +static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) +{ + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; + + ks8842_stop_dma(adapter); + + if (tx_ctl->chan) + dma_release_channel(tx_ctl->chan); + tx_ctl->chan = NULL; + + if (rx_ctl->chan) + dma_release_channel(rx_ctl->chan); + rx_ctl->chan = NULL; + + tasklet_kill(&rx_ctl->tasklet); + + if (sg_dma_address(&tx_ctl->sg)) + dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), + DMA_BUFFER_SIZE, DMA_TO_DEVICE); + sg_dma_address(&tx_ctl->sg) = 0; + + kfree(tx_ctl->buf); + tx_ctl->buf = NULL; +} + +static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) +{ + return chan->chan_id == (long)filter_param; +} + +static int ks8842_alloc_dma_bufs(struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; + int err; + + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_PRIVATE, mask); + + sg_init_table(&tx_ctl->sg, 1); + + tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, + (void *)(long)tx_ctl->channel); + if (!tx_ctl->chan) { + err = -ENODEV; + goto err; + } + + /* allocate DMA buffer */ + tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); + if (!tx_ctl->buf) { + err = -ENOMEM; + goto err; + } + + sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, + tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); + err = dma_mapping_error(adapter->dev, + sg_dma_address(&tx_ctl->sg)); + if (err) { + sg_dma_address(&tx_ctl->sg) = 0; + goto err; + } + + rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, + (void *)(long)rx_ctl->channel); + if (!rx_ctl->chan) { + err = -ENODEV; + goto err; + } + + tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, + (unsigned long)netdev); + + return 0; +err: + ks8842_dealloc_dma_bufs(adapter); + return err; +} /* Netdevice operations */ @@ -532,7 +983,26 @@ static int ks8842_open(struct net_device *netdev) struct ks8842_adapter *adapter = netdev_priv(netdev); int err; - dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__); + netdev_dbg(netdev, "%s - entry\n", __func__); + + if (KS8842_USE_DMA(adapter)) { + err = ks8842_alloc_dma_bufs(netdev); + + if (!err) { + /* start RX dma */ + err = __ks8842_start_new_rx_dma(netdev); + if (err) + ks8842_dealloc_dma_bufs(adapter); + } + + if (err) { + printk(KERN_WARNING DRV_NAME + ": Failed to initiate DMA, running PIO\n"); + ks8842_dealloc_dma_bufs(adapter); + adapter->dma_rx.channel = -1; + adapter->dma_tx.channel = -1; + } + } /* reset the HW */ ks8842_reset_hw(adapter); @@ -542,7 +1012,7 @@ static int ks8842_open(struct net_device *netdev) ks8842_update_link_status(netdev, adapter); err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, - adapter); + netdev); if (err) { pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); return err; @@ -555,10 +1025,15 @@ static int ks8842_close(struct net_device *netdev) { struct ks8842_adapter *adapter = netdev_priv(netdev); - dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__); + netdev_dbg(netdev, "%s - entry\n", __func__); + + cancel_work_sync(&adapter->timeout_work); + + if (KS8842_USE_DMA(adapter)) + ks8842_dealloc_dma_bufs(adapter); /* free the irq */ - free_irq(adapter->irq, adapter); + free_irq(adapter->irq, netdev); /* disable the switch */ ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); @@ -572,7 +1047,18 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, int ret; struct ks8842_adapter *adapter = netdev_priv(netdev); - dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); + netdev_dbg(netdev, "%s: entry\n", __func__); + + if (KS8842_USE_DMA(adapter)) { + unsigned long flags; + ret = ks8842_tx_frame_dma(skb, netdev); + /* for now only allow one transfer at the time */ + spin_lock_irqsave(&adapter->lock, flags); + if (adapter->dma_tx.adesc) + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->lock, flags); + return ret; + } ret = ks8842_tx_frame(skb, netdev); @@ -588,7 +1074,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p) struct sockaddr *addr = p; char *mac = (u8 *)addr->sa_data; - dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); + netdev_dbg(netdev, "%s: entry\n", __func__); if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -599,17 +1085,26 @@ static int ks8842_set_mac(struct net_device *netdev, void *p) return 0; } -static void ks8842_tx_timeout(struct net_device *netdev) +static void ks8842_tx_timeout_work(struct work_struct *work) { - struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_adapter *adapter = + container_of(work, struct ks8842_adapter, timeout_work); + struct net_device *netdev = adapter->netdev; unsigned long flags; - dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); + netdev_dbg(netdev, "%s: entry\n", __func__); spin_lock_irqsave(&adapter->lock, flags); + + if (KS8842_USE_DMA(adapter)) + ks8842_stop_dma(adapter); + /* disable interrupts */ ks8842_write16(adapter, 18, 0, REG_IER); ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); + + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->lock, flags); ks8842_reset_hw(adapter); @@ -617,6 +1112,18 @@ static void ks8842_tx_timeout(struct net_device *netdev) ks8842_write_mac_addr(adapter, netdev->dev_addr); ks8842_update_link_status(netdev, adapter); + + if (KS8842_USE_DMA(adapter)) + __ks8842_start_new_rx_dma(netdev); +} + +static void ks8842_tx_timeout(struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(netdev, "%s: entry\n", __func__); + + schedule_work(&adapter->timeout_work); } static const struct net_device_ops ks8842_netdev_ops = { @@ -653,7 +1160,11 @@ static int __devinit ks8842_probe(struct platform_device *pdev) SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); + adapter->netdev = netdev; + INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work); adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); + adapter->conf_flags = iomem->flags; + if (!adapter->hw_addr) goto err_ioremap; @@ -663,7 +1174,18 @@ static int __devinit ks8842_probe(struct platform_device *pdev) goto err_get_irq; } - adapter->pdev = pdev; + adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; + + /* DMA is only supported when accessed via timberdale */ + if (!(adapter->conf_flags & MICREL_KS884X) && pdata && + (pdata->tx_dma_channel != -1) && + (pdata->rx_dma_channel != -1)) { + adapter->dma_rx.channel = pdata->rx_dma_channel; + adapter->dma_tx.channel = pdata->tx_dma_channel; + } else { + adapter->dma_rx.channel = -1; + adapter->dma_tx.channel = -1; + } tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); spin_lock_init(&adapter->lock); diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 7805bbf1d53a6ceeaf02f8c68f4e3740e2c4939d..8b32cc107f0f51c02a6116bb24fc4c2cd3bbdc2a 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c @@ -1457,7 +1457,6 @@ struct dev_info { * @adapter: Adapter device information. * @port: Port information. * @monitor_time_info: Timer to monitor ports. - * @stats: Network statistics. * @proc_sem: Semaphore for proc accessing. * @id: Device ID. * @mii_if: MII interface information. @@ -1471,7 +1470,6 @@ struct dev_priv { struct dev_info *adapter; struct ksz_port port; struct ksz_timer_info monitor_timer_info; - struct net_device_stats stats; struct semaphore proc_sem; int id; @@ -4751,8 +4749,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) hw_send_pkt(hw); /* Update transmit statistics. */ - priv->stats.tx_packets++; - priv->stats.tx_bytes += len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; } /** @@ -5030,7 +5028,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, /* skb->data != skb->head */ skb = dev_alloc_skb(packet_len + 2); if (!skb) { - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; return -ENOMEM; } @@ -5050,8 +5048,8 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, csum_verified(skb); /* Update receive statistics. */ - priv->stats.rx_packets++; - priv->stats.rx_bytes += packet_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_len; /* Notify upper layer for received packet. */ rx_status = netif_rx(skb); @@ -5291,7 +5289,7 @@ static irqreturn_t netdev_intr(int irq, void *dev_id) } if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) { - priv->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; hw_resume_rx(hw); } @@ -5522,7 +5520,7 @@ static int netdev_open(struct net_device *dev) priv->promiscuous = 0; /* Reset device statistics. */ - memset(&priv->stats, 0, sizeof(struct net_device_stats)); + memset(&dev->stats, 0, sizeof(struct net_device_stats)); memset((void *) port->counter, 0, (sizeof(u64) * OID_COUNTER_LAST)); @@ -5622,42 +5620,42 @@ static struct net_device_stats *netdev_query_statistics(struct net_device *dev) int i; int p; - priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; - priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; + dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; + dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; /* Reset to zero to add count later. */ - priv->stats.multicast = 0; - priv->stats.collisions = 0; - priv->stats.rx_length_errors = 0; - priv->stats.rx_crc_errors = 0; - priv->stats.rx_frame_errors = 0; - priv->stats.tx_window_errors = 0; + dev->stats.multicast = 0; + dev->stats.collisions = 0; + dev->stats.rx_length_errors = 0; + dev->stats.rx_crc_errors = 0; + dev->stats.rx_frame_errors = 0; + dev->stats.tx_window_errors = 0; for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { mib = &hw->port_mib[p]; - priv->stats.multicast += (unsigned long) + dev->stats.multicast += (unsigned long) mib->counter[MIB_COUNTER_RX_MULTICAST]; - priv->stats.collisions += (unsigned long) + dev->stats.collisions += (unsigned long) mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION]; - priv->stats.rx_length_errors += (unsigned long)( + dev->stats.rx_length_errors += (unsigned long)( mib->counter[MIB_COUNTER_RX_UNDERSIZE] + mib->counter[MIB_COUNTER_RX_FRAGMENT] + mib->counter[MIB_COUNTER_RX_OVERSIZE] + mib->counter[MIB_COUNTER_RX_JABBER]); - priv->stats.rx_crc_errors += (unsigned long) + dev->stats.rx_crc_errors += (unsigned long) mib->counter[MIB_COUNTER_RX_CRC_ERR]; - priv->stats.rx_frame_errors += (unsigned long)( + dev->stats.rx_frame_errors += (unsigned long)( mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] + mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]); - priv->stats.tx_window_errors += (unsigned long) + dev->stats.tx_window_errors += (unsigned long) mib->counter[MIB_COUNTER_TX_LATE_COLLISION]; } - return &priv->stats; + return &dev->stats; } /** @@ -5718,7 +5716,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv, * from the bridge. */ if ((hw->features & STP_SUPPORT) && !promiscuous && - dev->br_port) { + (dev->priv_flags & IFF_BRIDGE_PORT)) { struct ksz_switch *sw = hw->ksz_switch; int port = priv->port.first_port; @@ -6896,13 +6894,12 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) i = j = num = got_num = 0; while (j < MAC_ADDR_LEN) { if (macaddr[i]) { + int digit; + got_num = 1; - if ('0' <= macaddr[i] && macaddr[i] <= '9') - num = num * 16 + macaddr[i] - '0'; - else if ('A' <= macaddr[i] && macaddr[i] <= 'F') - num = num * 16 + 10 + macaddr[i] - 'A'; - else if ('a' <= macaddr[i] && macaddr[i] <= 'f') - num = num * 16 + 10 + macaddr[i] - 'a'; + digit = hex_to_bin(macaddr[i]); + if (digit >= 0) + num = num * 16 + digit; else if (':' == macaddr[i]) got_num = 2; else diff --git a/drivers/net/lance.c b/drivers/net/lance.c index 21f8adaa87c164c8b2f604bf75937f5a072fb773..f06296bfe293fd8ddcce2b4906c897d45472f92b 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -248,7 +248,6 @@ struct lance_private { int cur_rx, cur_tx; /* The next free ring entry */ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ int dma; - struct net_device_stats stats; unsigned char chip_version; /* See lance_chip_type. */ spinlock_t devlock; }; @@ -925,7 +924,7 @@ static void lance_tx_timeout (struct net_device *dev) printk ("%s: transmit timed out, status %4.4x, resetting.\n", dev->name, inw (ioaddr + LANCE_DATA)); outw (0x0004, ioaddr + LANCE_DATA); - lp->stats.tx_errors++; + dev->stats.tx_errors++; #ifndef final_version if (lance_debug > 3) { int i; @@ -989,7 +988,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, lp->tx_ring[entry].misc = 0x0000; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* If any part of this buffer is >16M we must copy it to a low-memory buffer. */ @@ -1062,13 +1061,16 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) if (status & 0x40000000) { /* There was an major error, log it. */ int err_status = lp->tx_ring[entry].misc; - lp->stats.tx_errors++; - if (err_status & 0x0400) lp->stats.tx_aborted_errors++; - if (err_status & 0x0800) lp->stats.tx_carrier_errors++; - if (err_status & 0x1000) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (err_status & 0x0400) + dev->stats.tx_aborted_errors++; + if (err_status & 0x0800) + dev->stats.tx_carrier_errors++; + if (err_status & 0x1000) + dev->stats.tx_window_errors++; if (err_status & 0x4000) { /* Ackk! On FIFO errors the Tx unit is turned off! */ - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; /* Remove this verbosity later! */ printk("%s: Tx FIFO error! Status %4.4x.\n", dev->name, csr0); @@ -1077,8 +1079,8 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) } } else { if (status & 0x18000000) - lp->stats.collisions++; - lp->stats.tx_packets++; + dev->stats.collisions++; + dev->stats.tx_packets++; } /* We must free the original skb if it's not a data-only copy @@ -1108,8 +1110,10 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) } /* Log misc errors. */ - if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */ - if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & 0x4000) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) + dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & 0x0800) { printk("%s: Bus master arbitration failure, status %4.4x.\n", dev->name, csr0); @@ -1155,11 +1159,15 @@ lance_rx(struct net_device *dev) buffers it's possible for a jabber packet to use two buffers, with only the last correctly noting the error. */ if (status & 0x01) /* Only count a general error at the */ - lp->stats.rx_errors++; /* end of a packet.*/ - if (status & 0x20) lp->stats.rx_frame_errors++; - if (status & 0x10) lp->stats.rx_over_errors++; - if (status & 0x08) lp->stats.rx_crc_errors++; - if (status & 0x04) lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & 0x20) + dev->stats.rx_frame_errors++; + if (status & 0x10) + dev->stats.rx_over_errors++; + if (status & 0x08) + dev->stats.rx_crc_errors++; + if (status & 0x04) + dev->stats.rx_fifo_errors++; lp->rx_ring[entry].base &= 0x03ffffff; } else @@ -1171,7 +1179,7 @@ lance_rx(struct net_device *dev) if(pkt_len<60) { printk("%s: Runt packet!\n",dev->name); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } else { @@ -1185,7 +1193,7 @@ lance_rx(struct net_device *dev) if (i > RX_RING_SIZE -2) { - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; lp->rx_ring[entry].base |= 0x80000000; lp->cur_rx++; } @@ -1198,8 +1206,8 @@ lance_rx(struct net_device *dev) pkt_len); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes+=pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } /* The docs say that the buffer length isn't touched, but Andrew Boyd @@ -1225,7 +1233,7 @@ lance_close(struct net_device *dev) if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { outw(112, ioaddr+LANCE_ADDR); - lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); + dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); } outw(0, ioaddr+LANCE_ADDR); @@ -1262,12 +1270,12 @@ static struct net_device_stats *lance_get_stats(struct net_device *dev) spin_lock_irqsave(&lp->devlock, flags); saved_addr = inw(ioaddr+LANCE_ADDR); outw(112, ioaddr+LANCE_ADDR); - lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); + dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); outw(saved_addr, ioaddr+LANCE_ADDR); spin_unlock_irqrestore(&lp->devlock, flags); } - return &lp->stats; + return &dev->stats; } /* Set or clear the multicast filter for this adaptor. diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index 6474c4973d3ad977542ba53529ed9f0165cd21d6..b5c6279cc5a329be6b5dac9e209125e0071b4b6e 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -192,6 +192,35 @@ static int temac_dcr_setup(struct temac_local *lp, struct of_device *op, #endif +/** + * * temac_dma_bd_release - Release buffer descriptor rings + */ +static void temac_dma_bd_release(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + int i; + + for (i = 0; i < RX_BD_NUM; i++) { + if (!lp->rx_skb[i]) + break; + else { + dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, + XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb(lp->rx_skb[i]); + } + } + if (lp->rx_bd_v) + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + lp->rx_bd_v, lp->rx_bd_p); + if (lp->tx_bd_v) + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + lp->tx_bd_v, lp->tx_bd_p); + if (lp->rx_skb) + kfree(lp->rx_skb); +} + /** * temac_dma_bd_init - Setup buffer descriptor rings */ @@ -202,14 +231,29 @@ static int temac_dma_bd_init(struct net_device *ndev) int i; lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL); + if (!lp->rx_skb) { + dev_err(&ndev->dev, + "can't allocate memory for DMA RX buffer\n"); + goto out; + } /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual addres and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, &lp->tx_bd_p, GFP_KERNEL); + if (!lp->tx_bd_v) { + dev_err(&ndev->dev, + "unable to allocate DMA TX buffer descriptors"); + goto out; + } lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * RX_BD_NUM, &lp->rx_bd_p, GFP_KERNEL); + if (!lp->rx_bd_v) { + dev_err(&ndev->dev, + "unable to allocate DMA RX buffer descriptors"); + goto out; + } memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); for (i = 0; i < TX_BD_NUM; i++) { @@ -227,7 +271,7 @@ static int temac_dma_bd_init(struct net_device *ndev) if (skb == 0) { dev_err(&ndev->dev, "alloc_skb error %d\n", i); - return -1; + goto out; } lp->rx_skb[i] = skb; /* returns physical address of skb->data */ @@ -258,6 +302,10 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); return 0; + +out: + temac_dma_bd_release(ndev); + return -ENOMEM; } /* --------------------------------------------------------------------- @@ -505,7 +553,10 @@ static void temac_device_reset(struct net_device *ndev) } lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); - temac_dma_bd_init(ndev); + if (temac_dma_bd_init(ndev)) { + dev_err(&ndev->dev, + "temac_device_reset descriptor allocation failed\n"); + } temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0); temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0); @@ -837,6 +888,8 @@ static int temac_stop(struct net_device *ndev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; + temac_dma_bd_release(ndev); + return 0; } @@ -862,6 +915,7 @@ static const struct net_device_ops temac_netdev_ops = { .ndo_stop = temac_stop, .ndo_start_xmit = temac_start_xmit, .ndo_set_mac_address = netdev_set_mac_address, + .ndo_validate_addr = eth_validate_addr, //.ndo_set_multicast_list = temac_set_multicast_list, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = temac_poll_controller, @@ -978,19 +1032,22 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); } else { dev_err(&op->dev, "unable to map DMA registers\n"); + of_node_put(np); goto err_iounmap; } } lp->rx_irq = irq_of_parse_and_map(np, 0); lp->tx_irq = irq_of_parse_and_map(np, 1); + + of_node_put(np); /* Finished with the DMA node; drop the reference */ + if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { dev_err(&op->dev, "could not determine irqs\n"); rc = -ENOMEM; goto err_iounmap_2; } - of_node_put(np); /* Finished with the DMA node; drop the reference */ /* Retrieve the MAC address */ addr = of_get_property(op->dev.of_node, "local-mac-address", &size); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 72b7949c91b15fab1f608302d5d8cc751245d1ba..9a0996795321b327353e7a6d28cf94a308876c01 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -58,11 +58,13 @@ #include #include #include +#include struct pcpu_lstats { - unsigned long packets; - unsigned long bytes; - unsigned long drops; + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; + unsigned long drops; }; /* @@ -86,31 +88,40 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, len = skb->len; if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { + u64_stats_update_begin(&lb_stats->syncp); lb_stats->bytes += len; lb_stats->packets++; + u64_stats_update_end(&lb_stats->syncp); } else lb_stats->drops++; return NETDEV_TX_OK; } -static struct net_device_stats *loopback_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { const struct pcpu_lstats __percpu *pcpu_lstats; - struct net_device_stats *stats = &dev->stats; - unsigned long bytes = 0; - unsigned long packets = 0; - unsigned long drops = 0; + u64 bytes = 0; + u64 packets = 0; + u64 drops = 0; int i; pcpu_lstats = (void __percpu __force *)dev->ml_priv; for_each_possible_cpu(i) { const struct pcpu_lstats *lb_stats; + u64 tbytes, tpackets; + unsigned int start; lb_stats = per_cpu_ptr(pcpu_lstats, i); - bytes += lb_stats->bytes; - packets += lb_stats->packets; + do { + start = u64_stats_fetch_begin(&lb_stats->syncp); + tbytes = lb_stats->bytes; + tpackets = lb_stats->packets; + } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); drops += lb_stats->drops; + bytes += tbytes; + packets += tpackets; } stats->rx_packets = packets; stats->tx_packets = packets; @@ -158,7 +169,7 @@ static void loopback_dev_free(struct net_device *dev) static const struct net_device_ops loopback_ops = { .ndo_init = loopback_dev_init, .ndo_start_xmit= loopback_xmit, - .ndo_get_stats = loopback_get_stats, + .ndo_get_stats64 = loopback_get_stats64, }; /* diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index 1136c9a22b6772caa234a576eb5793d3b1e4fd78..3832fa4961dd6b3ba2a3806e236a07d0ad9c4f8b 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c @@ -157,6 +157,8 @@ static void dayna_block_output(struct net_device *dev, int count, #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) +#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) + /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); @@ -164,8 +166,8 @@ static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); -static void word_memcpy_tocard(void *tp, const void *fp, int count); -static void word_memcpy_fromcard(void *tp, const void *fp, int count); +static void word_memcpy_tocard(unsigned long tp, const void *fp, int count); +static void word_memcpy_fromcard(void *tp, unsigned long fp, int count); static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) { @@ -245,9 +247,9 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) unsigned long outdata = 0xA5A0B5B0; unsigned long indata = 0x00000000; /* Try writing 32 bits */ - memcpy(membase, &outdata, 4); + memcpy_toio(membase, &outdata, 4); /* Now compare them */ - if (memcmp((char *)&outdata, (char *)membase, 4) == 0) + if (memcmp_withio(&outdata, membase, 4) == 0) return ACCESS_32; /* Write 16 bit output */ word_memcpy_tocard(membase, &outdata, 4); @@ -554,7 +556,7 @@ static int __init mac8390_initdev(struct net_device *dev, case MAC8390_APPLE: switch (mac8390_testio(dev->mem_start)) { case ACCESS_UNKNOWN: - pr_info("Don't know how to access card memory!\n"); + pr_err("Don't know how to access card memory!\n"); return -ENODEV; break; @@ -641,12 +643,13 @@ static int __init mac8390_initdev(struct net_device *dev, static int mac8390_open(struct net_device *dev) { + int err; + __ei_open(dev); - if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { - pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq); - return -EAGAIN; - } - return 0; + err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev); + if (err) + pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq); + return err; } static int mac8390_close(struct net_device *dev) @@ -731,7 +734,7 @@ static void sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { unsigned long hdr_start = (ring_page - WD_START_PG)<<8; - memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4); + memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); /* Fix endianness */ hdr->count = swab16(hdr->count); } @@ -745,14 +748,13 @@ static void sane_block_input(struct net_device *dev, int count, if (xfer_start + count > ei_status.rmem_end) { /* We must wrap the input move. */ int semi_count = ei_status.rmem_end - xfer_start; - memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, + memcpy_fromio(skb->data, dev->mem_start + xfer_base, semi_count); count -= semi_count; - memcpy_toio(skb->data + semi_count, - (char *)ei_status.rmem_start, count); - } else { - memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, + memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, count); + } else { + memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); } } @@ -761,7 +763,7 @@ static void sane_block_output(struct net_device *dev, int count, { long shmem = (start_page - WD_START_PG)<<8; - memcpy_toio((char *)dev->mem_start + shmem, buf, count); + memcpy_toio(dev->mem_start + shmem, buf, count); } /* dayna block input/output */ @@ -812,7 +814,7 @@ static void slow_sane_get_8390_hdr(struct net_device *dev, int ring_page) { unsigned long hdr_start = (ring_page - WD_START_PG)<<8; - word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4); + word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4); /* Register endianism - fix here rather than 8390.c */ hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); } @@ -826,15 +828,14 @@ static void slow_sane_block_input(struct net_device *dev, int count, if (xfer_start + count > ei_status.rmem_end) { /* We must wrap the input move. */ int semi_count = ei_status.rmem_end - xfer_start; - word_memcpy_fromcard(skb->data, - (char *)dev->mem_start + xfer_base, + word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, semi_count); count -= semi_count; word_memcpy_fromcard(skb->data + semi_count, - (char *)ei_status.rmem_start, count); + ei_status.rmem_start, count); } else { - word_memcpy_fromcard(skb->data, - (char *)dev->mem_start + xfer_base, count); + word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, + count); } } @@ -843,12 +844,12 @@ static void slow_sane_block_output(struct net_device *dev, int count, { long shmem = (start_page - WD_START_PG)<<8; - word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count); + word_memcpy_tocard(dev->mem_start + shmem, buf, count); } -static void word_memcpy_tocard(void *tp, const void *fp, int count) +static void word_memcpy_tocard(unsigned long tp, const void *fp, int count) { - volatile unsigned short *to = tp; + volatile unsigned short *to = (void *)tp; const unsigned short *from = fp; count++; @@ -858,10 +859,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count) *to++ = *from++; } -static void word_memcpy_fromcard(void *tp, const void *fp, int count) +static void word_memcpy_fromcard(void *tp, unsigned long fp, int count) { unsigned short *to = tp; - const volatile unsigned short *from = fp; + const volatile unsigned short *from = (const void *)fp; count++; count /= 2; diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 69fa4ef64dd24d99f6b7981735578d1f7aae65c5..669b317974a88a74a65d76865316d78cbfd17140 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -110,7 +110,6 @@ static unsigned int net_debug = NET_DEBUG; /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; int chip_type; /* one of: CS8900, CS8920, CS8920M */ char chip_revision; /* revision letter of the chip ('A'...) */ int send_cmd; /* the propercommand used to send a packet. */ @@ -444,13 +443,18 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) net_rx(dev); break; case ISQ_TRANSMITTER_EVENT: - lp->stats.tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); - if ((status & TX_OK) == 0) lp->stats.tx_errors++; - if (status & TX_LOST_CRS) lp->stats.tx_carrier_errors++; - if (status & TX_SQE_ERROR) lp->stats.tx_heartbeat_errors++; - if (status & TX_LATE_COL) lp->stats.tx_window_errors++; - if (status & TX_16_COL) lp->stats.tx_aborted_errors++; + if ((status & TX_OK) == 0) + dev->stats.tx_errors++; + if (status & TX_LOST_CRS) + dev->stats.tx_carrier_errors++; + if (status & TX_SQE_ERROR) + dev->stats.tx_heartbeat_errors++; + if (status & TX_LATE_COL) + dev->stats.tx_window_errors++; + if (status & TX_16_COL) + dev->stats.tx_aborted_errors++; break; case ISQ_BUFFER_EVENT: if (status & READY_FOR_TX) { @@ -469,10 +473,10 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) } break; case ISQ_RX_MISS_EVENT: - lp->stats.rx_missed_errors += (status >>6); + dev->stats.rx_missed_errors += (status >> 6); break; case ISQ_TX_COL_EVENT: - lp->stats.collisions += (status >>6); + dev->stats.collisions += (status >> 6); break; } } @@ -483,19 +487,22 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) static void net_rx(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; int status, length; status = readreg(dev, PP_RxStatus); if ((status & RX_OK) == 0) { - lp->stats.rx_errors++; - if (status & RX_RUNT) lp->stats.rx_length_errors++; - if (status & RX_EXTRA_DATA) lp->stats.rx_length_errors++; - if (status & RX_CRC_ERROR) if (!(status & (RX_EXTRA_DATA|RX_RUNT))) + dev->stats.rx_errors++; + if (status & RX_RUNT) + dev->stats.rx_length_errors++; + if (status & RX_EXTRA_DATA) + dev->stats.rx_length_errors++; + if ((status & RX_CRC_ERROR) && + !(status & (RX_EXTRA_DATA|RX_RUNT))) /* per str 172 */ - lp->stats.rx_crc_errors++; - if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++; + dev->stats.rx_crc_errors++; + if (status & RX_DRIBBLE) + dev->stats.rx_frame_errors++; return; } @@ -504,7 +511,7 @@ net_rx(struct net_device *dev) skb = alloc_skb(length, GFP_ATOMIC); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_put(skb, length); @@ -519,8 +526,8 @@ net_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; } /* The inverse routine to net_open(). */ @@ -548,16 +555,15 @@ net_close(struct net_device *dev) static struct net_device_stats * net_get_stats(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); unsigned long flags; local_irq_save(flags); /* Update the statistics from the device registers. */ - lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); - lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6); + dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); + dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6); local_irq_restore(flags); - return &lp->stats; + return &dev->stats; } static void set_multicast_list(struct net_device *dev) diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 40797fbdca9f774798668dba14381a96f9b68511..ff2f158ab0b92d88597bf68d1b1a13cf89ca4e41 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -1082,7 +1082,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, if_mii(rq), cmd); + return phy_mii_ioctl(phydev, rq, cmd); } static const struct net_device_ops macb_netdev_ops = { diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index adb54fe2d82a3e024880c64657ffb88253a40d2b..c93679ee6994a7ff5851e507974565a5a3495f1b 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -140,21 +140,40 @@ static irqreturn_t macsonic_interrupt(int irq, void *dev_id) static int macsonic_open(struct net_device* dev) { - if (request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { - printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); - return -EAGAIN; + int retval; + + retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST, + "sonic", dev); + if (retval) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", + dev->name, dev->irq); + goto err; } /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes * in at priority level 3. However, we sometimes get the level 2 inter- * rupt as well, which must prevent re-entrance of the sonic handler. */ - if (dev->irq == IRQ_AUTO_3) - if (request_irq(IRQ_NUBUS_9, macsonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { - printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9); - free_irq(dev->irq, dev); - return -EAGAIN; + if (dev->irq == IRQ_AUTO_3) { + retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, + IRQ_FLG_FAST, "sonic", dev); + if (retval) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", + dev->name, IRQ_NUBUS_9); + goto err_irq; } - return sonic_open(dev); + } + retval = sonic_open(dev); + if (retval) + goto err_irq_nubus; + return 0; + +err_irq_nubus: + if (dev->irq == IRQ_AUTO_3) + free_irq(IRQ_NUBUS_9, dev); +err_irq: + free_irq(dev->irq, dev); +err: + return retval; } static int macsonic_close(struct net_device* dev) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index f15fe2cf72ae1261fe6d0cde6ca263c2d0c54809..0ef0eb0db94564dda7d0c7f28c7581db799e0e7a 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -37,8 +37,14 @@ struct macvlan_port { struct net_device *dev; struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; struct list_head vlans; + struct rcu_head rcu; }; +#define macvlan_port_get_rcu(dev) \ + ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) +#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) +#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT) + static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, const unsigned char *addr) { @@ -145,15 +151,17 @@ static void macvlan_broadcast(struct sk_buff *skb, } /* called under rcu_read_lock() from netif_receive_skb */ -static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port, - struct sk_buff *skb) +static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) { + struct macvlan_port *port; const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; const struct macvlan_dev *src; struct net_device *dev; - unsigned int len; + unsigned int len = 0; + int ret = NET_RX_DROP; + port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { src = macvlan_hash_lookup(port, eth->h_source); if (!src) @@ -188,14 +196,16 @@ static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port, } len = skb->len + ETH_HLEN; skb = skb_share_check(skb, GFP_ATOMIC); - macvlan_count_rx(vlan, len, skb != NULL, 0); if (!skb) - return NULL; + goto out; skb->dev = dev; skb->pkt_type = PACKET_HOST; - vlan->receive(skb); + ret = vlan->receive(skb); + +out: + macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); return NULL; } @@ -424,29 +434,38 @@ static void macvlan_uninit(struct net_device *dev) free_percpu(vlan->rx_stats); } -static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { - struct net_device_stats *stats = &dev->stats; struct macvlan_dev *vlan = netdev_priv(dev); dev_txq_stats_fold(dev, stats); if (vlan->rx_stats) { - struct macvlan_rx_stats *p, rx = {0}; + struct macvlan_rx_stats *p, accum = {0}; + u64 rx_packets, rx_bytes, rx_multicast; + unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(vlan->rx_stats, i); - rx.rx_packets += p->rx_packets; - rx.rx_bytes += p->rx_bytes; - rx.rx_errors += p->rx_errors; - rx.multicast += p->multicast; + do { + start = u64_stats_fetch_begin_bh(&p->syncp); + rx_packets = p->rx_packets; + rx_bytes = p->rx_bytes; + rx_multicast = p->rx_multicast; + } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + accum.rx_packets += rx_packets; + accum.rx_bytes += rx_bytes; + accum.rx_multicast += rx_multicast; + /* rx_errors is an ulong, updated without syncp protection */ + accum.rx_errors += p->rx_errors; } - stats->rx_packets = rx.rx_packets; - stats->rx_bytes = rx.rx_bytes; - stats->rx_errors = rx.rx_errors; - stats->rx_dropped = rx.rx_errors; - stats->multicast = rx.multicast; + stats->rx_packets = accum.rx_packets; + stats->rx_bytes = accum.rx_bytes; + stats->rx_errors = accum.rx_errors; + stats->rx_dropped = accum.rx_errors; + stats->multicast = accum.rx_multicast; } return stats; } @@ -495,7 +514,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, .ndo_set_multicast_list = macvlan_set_multicast_list, - .ndo_get_stats = macvlan_dev_get_stats, + .ndo_get_stats64 = macvlan_dev_get_stats64, .ndo_validate_addr = eth_validate_addr, }; @@ -521,6 +540,7 @@ static int macvlan_port_create(struct net_device *dev) { struct macvlan_port *port; unsigned int i; + int err; if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; @@ -533,19 +553,32 @@ static int macvlan_port_create(struct net_device *dev) INIT_LIST_HEAD(&port->vlans); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]); - rcu_assign_pointer(dev->macvlan_port, port); - return 0; + + err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); + if (err) + kfree(port); + + dev->priv_flags |= IFF_MACVLAN_PORT; + return err; } -static void macvlan_port_destroy(struct net_device *dev) +static void macvlan_port_rcu_free(struct rcu_head *head) { - struct macvlan_port *port = dev->macvlan_port; + struct macvlan_port *port; - rcu_assign_pointer(dev->macvlan_port, NULL); - synchronize_rcu(); + port = container_of(head, struct macvlan_port, rcu); kfree(port); } +static void macvlan_port_destroy(struct net_device *dev) +{ + struct macvlan_port *port = macvlan_port_get(dev); + + dev->priv_flags &= ~IFF_MACVLAN_PORT; + netdev_rx_handler_unregister(dev); + call_rcu(&port->rcu, macvlan_port_rcu_free); +} + static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { @@ -621,12 +654,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (!tb[IFLA_ADDRESS]) random_ether_addr(dev->dev_addr); - if (lowerdev->macvlan_port == NULL) { + if (!macvlan_port_exists(lowerdev)) { err = macvlan_port_create(lowerdev); if (err < 0) return err; } - port = lowerdev->macvlan_port; + port = macvlan_port_get(lowerdev); vlan->lowerdev = lowerdev; vlan->dev = dev; @@ -736,10 +769,11 @@ static int macvlan_device_event(struct notifier_block *unused, struct macvlan_dev *vlan, *next; struct macvlan_port *port; - port = dev->macvlan_port; - if (port == NULL) + if (!macvlan_port_exists(dev)) return NOTIFY_DONE; + port = macvlan_port_get(dev); + switch (event) { case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) @@ -773,14 +807,12 @@ static int __init macvlan_init_module(void) int err; register_netdevice_notifier(&macvlan_notifier_block); - macvlan_handle_frame_hook = macvlan_handle_frame; err = macvlan_link_register(&macvlan_link_ops); if (err < 0) goto err1; return 0; err1: - macvlan_handle_frame_hook = NULL; unregister_netdevice_notifier(&macvlan_notifier_block); return err; } @@ -788,7 +820,6 @@ static int __init macvlan_init_module(void) static void __exit macvlan_cleanup_module(void) { rtnl_link_unregister(&macvlan_link_ops); - macvlan_handle_frame_hook = NULL; unregister_netdevice_notifier(&macvlan_notifier_block); } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index ff02b836c3c4a3d23c6b78fc707508b40732d9f8..3b1c54a9c6ef12bc255f0af833ef6de2788da219 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -58,7 +58,7 @@ static struct proto macvtap_proto = { * only has one tap, the interface numbers assure that the * device nodes are unique. */ -static unsigned int macvtap_major; +static dev_t macvtap_major; #define MACVTAP_NUM_DEVS 65536 static struct class *macvtap_class; static struct cdev macvtap_cdev; diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c index f599294fa8abbe0f7195fb8a7e6b539a60faf618..68aaa42d0ced7b1f93771cacce3ca5abeec786db 100644 --- a/drivers/net/mlx4/catas.c +++ b/drivers/net/mlx4/catas.c @@ -101,8 +101,8 @@ static void catas_reset(struct work_struct *work) ret = mlx4_restart_one(priv->dev.pdev); /* 'priv' now is not valid */ if (ret) - printk(KERN_ERR "mlx4 %s: Reset failed (%d)\n", - pci_name(pdev), ret); + pr_err("mlx4 %s: Reset failed (%d)\n", + pci_name(pdev), ret); else { dev = pci_get_drvdata(pdev); mlx4_dbg(dev, "Reset succeeded\n"); diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c index d5afd037cd7d1ceec5e274318f78195a2eb3a7b3..b275238fe70d347dd89fa59b3152f1fc1fdc162d 100644 --- a/drivers/net/mlx4/en_ethtool.c +++ b/drivers/net/mlx4/en_ethtool.c @@ -387,6 +387,42 @@ static void mlx4_en_get_ringparam(struct net_device *dev, param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; } +static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int rc = 0; + int changed = 0; + + if (data & ~ETH_FLAG_LRO) + return -EOPNOTSUPP; + + if (data & ETH_FLAG_LRO) { + if (mdev->profile.num_lro == 0) + return -EOPNOTSUPP; + if (!(dev->features & NETIF_F_LRO)) + changed = 1; + } else if (dev->features & NETIF_F_LRO) { + changed = 1; + } + + if (changed) { + if (netif_running(dev)) { + mutex_lock(&mdev->state_lock); + mlx4_en_stop_port(dev); + } + dev->features ^= NETIF_F_LRO; + if (netif_running(dev)) { + rc = mlx4_en_start_port(dev); + if (rc) + en_err(priv, "Failed to restart port\n"); + mutex_unlock(&mdev->state_lock); + } + } + + return rc; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, .get_settings = mlx4_en_get_settings, @@ -415,7 +451,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_ringparam = mlx4_en_get_ringparam, .set_ringparam = mlx4_en_set_ringparam, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, + .set_flags = mlx4_ethtool_op_set_flags, }; diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index cbabf14f95d027aa57be9cf77126cd30bae4b0d2..97934f1ec53af83c0dfe40e3b9e8730c83ae1c1e 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c @@ -79,6 +79,29 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." " Per priority bit mask"); +int en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...) +{ + va_list args; + struct va_format vaf; + int i; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + if (priv->registered) + i = printk("%s%s: %s: %pV", + level, DRV_NAME, priv->dev->name, &vaf); + else + i = printk("%s%s: %s: Port %d: %pV", + level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), + priv->port, &vaf); + va_end(args); + + return i; +} + static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) { struct mlx4_en_profile *params = &mdev->profile; @@ -152,15 +175,11 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) static void *mlx4_en_add(struct mlx4_dev *dev) { - static int mlx4_en_version_printed; struct mlx4_en_dev *mdev; int i; int err; - if (!mlx4_en_version_printed) { - printk(KERN_INFO "%s", mlx4_en_version); - mlx4_en_version_printed++; - } + printk_once(KERN_INFO "%s", mlx4_en_version); mdev = kzalloc(sizeof *mdev, GFP_KERNEL); if (!mdev) { diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 96180c0ec206ff89b915d83119c101f751c2adef..a0d8a26f5a025be35c67ff29b4a124f34ceb1c78 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -961,6 +961,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); + dev->dev_id = port - 1; /* * Initialize driver private data diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 423053482ed51c3991495b1dab8771ef45c03231..6d7b2bf210ceb8818a811699d179819304df8510 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -110,7 +110,7 @@ struct mlx4_eqe { u32 raw[6]; struct { __be32 cqn; - } __attribute__((packed)) comp; + } __packed comp; struct { u16 reserved1; __be16 token; @@ -118,27 +118,27 @@ struct mlx4_eqe { u8 reserved3[3]; u8 status; __be64 out_param; - } __attribute__((packed)) cmd; + } __packed cmd; struct { __be32 qpn; - } __attribute__((packed)) qp; + } __packed qp; struct { __be32 srqn; - } __attribute__((packed)) srq; + } __packed srq; struct { __be32 cqn; u32 reserved1; u8 reserved2[3]; u8 syndrome; - } __attribute__((packed)) cq_err; + } __packed cq_err; struct { u32 reserved1[2]; __be32 port; - } __attribute__((packed)) port_change; + } __packed port_change; } event; u8 reserved3[3]; u8 owner; -} __attribute__((packed)); +} __packed; static void eq_set_ci(struct mlx4_eq *eq, int req_not) { @@ -475,10 +475,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev, mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { if (i % 4 == 0) - printk("[%02x] ", i * 4); - printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); + pr_cont("[%02x] ", i * 4); + pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); if ((i + 1) % 4 == 0) - printk("\n"); + pr_cont("\n"); } } diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index e3e0d54a7c87851ca15df3f640326de174dc9a42..5102ab1ac561dbaece380465a9806d886d29f651 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -1050,8 +1050,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) int err; int port; - printk(KERN_INFO PFX "Initializing %s\n", - pci_name(pdev)); + pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); err = pci_enable_device(pdev); if (err) { @@ -1216,12 +1215,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) static int __devinit mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - static int mlx4_version_printed; - - if (!mlx4_version_printed) { - printk(KERN_INFO "%s", mlx4_version); - ++mlx4_version_printed; - } + printk_once(KERN_INFO "%s", mlx4_version); return __mlx4_init_one(pdev, id); } @@ -1301,17 +1295,17 @@ static struct pci_driver mlx4_driver = { static int __init mlx4_verify_params(void) { if ((log_num_mac < 0) || (log_num_mac > 7)) { - printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac); + pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; } if ((log_num_vlan < 0) || (log_num_vlan > 7)) { - printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan); + pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan); return -1; } if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { - printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); + pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); return -1; } diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 13343e8849997d46b0b408e81b7b629f97be9528..0da5bb7285b4c98f1646f5e0ae88cd23d698798b 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -48,7 +48,6 @@ #include #define DRV_NAME "mlx4_core" -#define PFX DRV_NAME ": " #define DRV_VERSION "0.01" #define DRV_RELDATE "May 1, 2007" @@ -88,17 +87,17 @@ extern int mlx4_debug_level; #endif /* CONFIG_MLX4_DEBUG */ #define mlx4_dbg(mdev, format, arg...) \ - do { \ - if (mlx4_debug_level) \ - dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ - } while (0) +do { \ + if (mlx4_debug_level) \ + dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ +} while (0) #define mlx4_err(mdev, format, arg...) \ - dev_err(&mdev->pdev->dev, format, ## arg) + dev_err(&mdev->pdev->dev, format, ##arg) #define mlx4_info(mdev, format, arg...) \ - dev_info(&mdev->pdev->dev, format, ## arg) + dev_info(&mdev->pdev->dev, format, ##arg) #define mlx4_warn(mdev, format, arg...) \ - dev_warn(&mdev->pdev->dev, format, ## arg) + dev_warn(&mdev->pdev->dev, format, ##arg) struct mlx4_bitmap { u32 last; diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index b55e46c8b682cefce93fa7179b9964f10c1cf518..449210994ee9cee94323fb3646af114aa4ca3619 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h @@ -52,40 +52,8 @@ #define DRV_VERSION "1.4.1.1" #define DRV_RELDATE "June 2009" - #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) -#define en_print(level, priv, format, arg...) \ - { \ - if ((priv)->registered) \ - printk(level "%s: %s: " format, DRV_NAME, \ - (priv->dev)->name, ## arg); \ - else \ - printk(level "%s: %s: Port %d: " format, \ - DRV_NAME, dev_name(&priv->mdev->pdev->dev), \ - (priv)->port, ## arg); \ - } - -#define en_dbg(mlevel, priv, format, arg...) \ - { \ - if (NETIF_MSG_##mlevel & priv->msg_enable) \ - en_print(KERN_DEBUG, priv, format, ## arg) \ - } -#define en_warn(priv, format, arg...) \ - en_print(KERN_WARNING, priv, format, ## arg) -#define en_err(priv, format, arg...) \ - en_print(KERN_ERR, priv, format, ## arg) - -#define mlx4_err(mdev, format, arg...) \ - printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ - dev_name(&mdev->pdev->dev) , ## arg) -#define mlx4_info(mdev, format, arg...) \ - printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ - dev_name(&mdev->pdev->dev) , ## arg) -#define mlx4_warn(mdev, format, arg...) \ - printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ - dev_name(&mdev->pdev->dev) , ## arg) - /* * Device constants */ @@ -568,4 +536,34 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); * Globals */ extern const struct ethtool_ops mlx4_en_ethtool_ops; + + + +/* + * printk / logging functions + */ + +int en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...) __attribute__ ((format (printf, 3, 4))); + +#define en_dbg(mlevel, priv, format, arg...) \ +do { \ + if (NETIF_MSG_##mlevel & priv->msg_enable) \ + en_print(KERN_DEBUG, priv, format, ##arg); \ +} while (0) +#define en_warn(priv, format, arg...) \ + en_print(KERN_WARNING, priv, format, ##arg) +#define en_err(priv, format, arg...) \ + en_print(KERN_ERR, priv, format, ##arg) + +#define mlx4_err(mdev, format, arg...) \ + pr_err("%s %s: " format, DRV_NAME, \ + dev_name(&mdev->pdev->dev), ##arg) +#define mlx4_info(mdev, format, arg...) \ + pr_info("%s %s: " format, DRV_NAME, \ + dev_name(&mdev->pdev->dev), ##arg) +#define mlx4_warn(mdev, format, arg...) \ + pr_warning("%s %s: " format, DRV_NAME, \ + dev_name(&mdev->pdev->dev), ##arg) + #endif diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 3dc69be4949f2b175418273a2905c866dad4acd0..9c188bdd7f4f2c5a6a3a124749d513c4d6b77958 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -58,7 +58,7 @@ struct mlx4_mpt_entry { __be32 mtt_sz; __be32 entity_size; __be32 first_byte_offset; -} __attribute__((packed)); +} __packed; #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) #define MLX4_MPT_FLAG_FREE (0x3UL << 28) diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 73bb8ea6f54a46a3c7c9c9246753a497b452d035..2d488abcf62d7c789a84583dd1ad7564c00ba80c 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1640,6 +1640,11 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, } } +static int mv643xx_eth_set_flags(struct net_device *dev, u32 data) +{ + return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO); +} + static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) { if (sset == ETH_SS_STATS) @@ -1665,7 +1670,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = { .get_strings = mv643xx_eth_get_strings, .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, + .set_flags = mv643xx_eth_set_flags, .get_sset_count = mv643xx_eth_get_sset_count, }; @@ -2456,7 +2461,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct mv643xx_eth_private *mp = netdev_priv(dev); if (mp->phy != NULL) - return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd); + return phy_mii_ioctl(mp->phy, ifr, cmd); return -EOPNOTSUPP; } @@ -2670,7 +2675,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) * Detect hardware parameters. */ msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; - msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024; + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? + pd->tx_csum_limit : 9 * 1024; infer_hw_params(msp); platform_set_drvdata(pdev, msp); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e0b47cc8a86e7600da43d0cb4915d7e58feb06b4..d771d1650d600c49b308b0106abbc5e6b45eab69 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -1730,8 +1730,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) if (csum_enabled) mgp->csum_flag = MXGEFW_FLAGS_CKSUM; else { - u32 flags = ethtool_op_get_flags(netdev); - err = ethtool_op_set_flags(netdev, (flags & ~ETH_FLAG_LRO)); + netdev->features &= ~NETIF_F_LRO; mgp->csum_flag = 0; } @@ -1900,6 +1899,11 @@ static u32 myri10ge_get_msglevel(struct net_device *netdev) return mgp->msg_enable; } +static int myri10ge_set_flags(struct net_device *netdev, u32 value) +{ + return ethtool_op_set_flags(netdev, value, ETH_FLAG_LRO); +} + static const struct ethtool_ops myri10ge_ethtool_ops = { .get_settings = myri10ge_get_settings, .get_drvinfo = myri10ge_get_drvinfo, @@ -1920,7 +1924,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .set_msglevel = myri10ge_set_msglevel, .get_msglevel = myri10ge_get_msglevel, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags + .set_flags = myri10ge_set_flags }; static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 2a17b503feaa9c72975454a983a2e28596667d7d..a6033d48b5ccae7a1efa2377baac061d15075fb6 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -548,7 +548,6 @@ struct netdev_private { dma_addr_t tx_dma[TX_RING_SIZE]; struct net_device *dev; struct napi_struct napi; - struct net_device_stats stats; /* Media monitoring timer */ struct timer_list timer; /* Frequently used values: keep some adjacent for cache effect */ @@ -1906,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev) enable_irq(dev->irq); dev->trans_start = jiffies; /* prevent tx timeout */ - np->stats.tx_errors++; + dev->stats.tx_errors++; netif_wake_queue(dev); } @@ -2009,7 +2008,7 @@ static void drain_tx(struct net_device *dev) np->tx_dma[i], np->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(np->tx_skbuff[i]); - np->stats.tx_dropped++; + dev->stats.tx_dropped++; } np->tx_skbuff[i] = NULL; } @@ -2115,7 +2114,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) writel(TxOn, ioaddr + ChipCmd); } else { dev_kfree_skb_irq(skb); - np->stats.tx_dropped++; + dev->stats.tx_dropped++; } spin_unlock_irqrestore(&np->lock, flags); @@ -2140,20 +2139,20 @@ static void netdev_tx_done(struct net_device *dev) dev->name, np->dirty_tx, le32_to_cpu(np->tx_ring[entry].cmd_status)); if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { - np->stats.tx_packets++; - np->stats.tx_bytes += np->tx_skbuff[entry]->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->tx_skbuff[entry]->len; } else { /* Various Tx errors */ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status); if (tx_status & (DescTxAbort|DescTxExcColl)) - np->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (tx_status & DescTxFIFO) - np->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (tx_status & DescTxCarrier) - np->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & DescTxOOWCol) - np->stats.tx_window_errors++; - np->stats.tx_errors++; + dev->stats.tx_window_errors++; + dev->stats.tx_errors++; } pci_unmap_single(np->pci_dev,np->tx_dma[entry], np->tx_skbuff[entry]->len, @@ -2301,7 +2300,7 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) "buffers, entry %#08x " "status %#08x.\n", dev->name, np->cur_rx, desc_status); - np->stats.rx_length_errors++; + dev->stats.rx_length_errors++; /* The RX state machine has probably * locked up beneath us. Follow the @@ -2321,15 +2320,15 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) } else { /* There was an error. */ - np->stats.rx_errors++; + dev->stats.rx_errors++; if (desc_status & (DescRxAbort|DescRxOver)) - np->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (desc_status & (DescRxLong|DescRxRunt)) - np->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (desc_status & (DescRxInvalid|DescRxAlign)) - np->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (desc_status & DescRxCRC) - np->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } } else if (pkt_len > np->rx_buf_sz) { /* if this is the tail of a double buffer @@ -2364,8 +2363,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) } skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); - np->stats.rx_packets++; - np->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } entry = (++np->cur_rx) % RX_RING_SIZE; np->rx_head_desc = &np->rx_ring[entry]; @@ -2428,17 +2427,17 @@ static void netdev_error(struct net_device *dev, int intr_status) printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", dev->name); } - np->stats.rx_fifo_errors++; - np->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + dev->stats.rx_errors++; } /* Hmmmmm, it's not clear how to recover from PCI faults. */ if (intr_status & IntrPCIErr) { printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, intr_status & IntrPCIErr); - np->stats.tx_fifo_errors++; - np->stats.tx_errors++; - np->stats.rx_fifo_errors++; - np->stats.rx_errors++; + dev->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + dev->stats.rx_fifo_errors++; + dev->stats.rx_errors++; } spin_unlock(&np->lock); } @@ -2446,11 +2445,10 @@ static void netdev_error(struct net_device *dev, int intr_status) static void __get_stats(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); - struct netdev_private *np = netdev_priv(dev); /* The chip only need report frame silently dropped. */ - np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); - np->stats.rx_missed_errors += readl(ioaddr + RxMissed); + dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); + dev->stats.rx_missed_errors += readl(ioaddr + RxMissed); } static struct net_device_stats *get_stats(struct net_device *dev) @@ -2463,7 +2461,7 @@ static struct net_device_stats *get_stats(struct net_device *dev) __get_stats(dev); spin_unlock_irq(&np->lock); - return &np->stats; + return &dev->stats; } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 3a41b6a84a68636a61fd85fd233a17a4c0ce3cd6..12612127a08718e71b0c6ca07881fedd9eb4e31d 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c @@ -254,6 +254,19 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) return err; } +static void +nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter) +{ + + netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION, + adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0, + NX_CDRP_CMD_DESTROY_RX_CTX); + + netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION, + adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0, + NX_CDRP_CMD_DESTROY_TX_CTX); +} + static void nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) { @@ -685,7 +698,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) goto done; - + if (reset_devices) + nx_fw_cmd_reset_ctx(adapter); err = nx_fw_cmd_create_rx_ctx(adapter); if (err) goto err_out_free; diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 20f7c58bd0920fefe38d5bc9fb4cc58892d9a526..b30de24f4a520683d4502a83ea11939d775ecefb 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -887,12 +887,19 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data) struct netxen_adapter *adapter = netdev_priv(netdev); int hw_lro; - if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) + if (data & ~ETH_FLAG_LRO) return -EINVAL; - ethtool_op_set_flags(netdev, data); + if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) + return -EINVAL; - hw_lro = (data & ETH_FLAG_LRO) ? NETXEN_NIC_LRO_ENABLED : 0; + if (data & ETH_FLAG_LRO) { + hw_lro = NETXEN_NIC_LRO_ENABLED; + netdev->features |= NETIF_F_LRO; + } else { + hw_lro = 0; + netdev->features &= ~NETIF_F_LRO; + } if (netxen_config_hw_lro(adapter, hw_lro)) return -EIO; diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index 9bddb5fa7a9697d6d4a85ab9ea73960742ed7658..33618edc61f9a3bc85d37bdc202e473db43a3853 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -185,7 +185,6 @@ static void ni52_xmt_int(struct net_device *dev); static void ni52_rnr_int(struct net_device *dev); struct priv { - struct net_device_stats stats; char __iomem *base; char __iomem *mapped; char __iomem *memtop; @@ -972,10 +971,10 @@ static void ni52_rcv_int(struct net_device *dev) memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - p->stats.rx_packets++; - p->stats.rx_bytes += totlen; + dev->stats.rx_packets++; + dev->stats.rx_bytes += totlen; } else - p->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { int rstat; /* free all RBD's until RBD_LAST is set */ @@ -993,12 +992,12 @@ static void ni52_rcv_int(struct net_device *dev) writew(0, &rbd->status); printk(KERN_ERR "%s: received oversized frame! length: %d\n", dev->name, totlen); - p->stats.rx_dropped++; + dev->stats.rx_dropped++; } } else {/* frame !(ok), only with 'save-bad-frames' */ printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n", dev->name, status); - p->stats.rx_errors++; + dev->stats.rx_errors++; } writeb(0, &p->rfd_top->stat_high); writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */ @@ -1043,7 +1042,7 @@ static void ni52_rnr_int(struct net_device *dev) { struct priv *p = netdev_priv(dev); - p->stats.rx_errors++; + dev->stats.rx_errors++; wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */ @@ -1076,29 +1075,29 @@ static void ni52_xmt_int(struct net_device *dev) printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); if (status & STAT_OK) { - p->stats.tx_packets++; - p->stats.collisions += (status & TCMD_MAXCOLLMASK); + dev->stats.tx_packets++; + dev->stats.collisions += (status & TCMD_MAXCOLLMASK); } else { - p->stats.tx_errors++; + dev->stats.tx_errors++; if (status & TCMD_LATECOLL) { printk(KERN_ERR "%s: late collision detected.\n", dev->name); - p->stats.collisions++; + dev->stats.collisions++; } else if (status & TCMD_NOCARRIER) { - p->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; printk(KERN_ERR "%s: no carrier detected.\n", dev->name); } else if (status & TCMD_LOSTCTS) printk(KERN_ERR "%s: loss of CTS detected.\n", dev->name); else if (status & TCMD_UNDERRUN) { - p->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: DMA underrun detected.\n", dev->name); } else if (status & TCMD_MAXCOLL) { printk(KERN_ERR "%s: Max. collisions exceeded.\n", dev->name); - p->stats.collisions += 16; + dev->stats.collisions += 16; } } #if (NUM_XMIT_BUFFS > 1) @@ -1286,12 +1285,12 @@ static struct net_device_stats *ni52_get_stats(struct net_device *dev) ovrn = readw(&p->scb->ovrn_errs); writew(0, &p->scb->ovrn_errs); - p->stats.rx_crc_errors += crc; - p->stats.rx_fifo_errors += ovrn; - p->stats.rx_frame_errors += aln; - p->stats.rx_dropped += rsc; + dev->stats.rx_crc_errors += crc; + dev->stats.rx_fifo_errors += ovrn; + dev->stats.rx_frame_errors += aln; + dev->stats.rx_dropped += rsc; - return &p->stats; + return &dev->stats; } /******************************************************** diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 63e8e3893bd6cb9d05ad674a036c1b3f49e82aad..b9b950845b0e7d480a020f997f6707b89258f13e 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -3330,10 +3330,12 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { if (p->index == addr) { *link = pp; - break; + goto found; } } + BUG(); +found: return p; } @@ -7920,14 +7922,7 @@ static int niu_phys_id(struct net_device *dev, u32 data) static int niu_set_flags(struct net_device *dev, u32 data) { - if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE)) - return -EOPNOTSUPP; - - if (data & ETH_FLAG_RXHASH) - dev->features |= NETIF_F_RXHASH; - else - dev->features &= ~NETIF_F_RXHASH; - return 0; + return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH); } static const struct ethtool_ops niu_ethtool_ops = { diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index e88e97cd1b10805803fb876c73af9c06848cb47f..5a3488f76b388f49939a373abb1e03079ca07554 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -424,7 +424,6 @@ struct rx_info { struct ns83820 { - struct net_device_stats stats; u8 __iomem *base; struct pci_dev *pci_dev; @@ -918,9 +917,9 @@ static void rx_irq(struct net_device *ndev) if (unlikely(!skb)) goto netdev_mangle_me_harder_failed; if (cmdsts & CMDSTS_DEST_MULTI) - dev->stats.multicast ++; - dev->stats.rx_packets ++; - dev->stats.rx_bytes += len; + ndev->stats.multicast++; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else { @@ -940,7 +939,7 @@ static void rx_irq(struct net_device *ndev) #endif if (NET_RX_DROP == rx_rc) { netdev_mangle_me_harder_failed: - dev->stats.rx_dropped ++; + ndev->stats.rx_dropped++; } } else { kfree_skb(skb); @@ -1008,11 +1007,11 @@ static void do_tx_done(struct net_device *ndev) dma_addr_t addr; if (cmdsts & CMDSTS_ERR) - dev->stats.tx_errors ++; + ndev->stats.tx_errors++; if (cmdsts & CMDSTS_OK) - dev->stats.tx_packets ++; + ndev->stats.tx_packets++; if (cmdsts & CMDSTS_OK) - dev->stats.tx_bytes += cmdsts & 0xffff; + ndev->stats.tx_bytes += cmdsts & 0xffff; dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", tx_done_idx, dev->tx_free_idx, cmdsts); @@ -1212,20 +1211,21 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, static void ns83820_update_stats(struct ns83820 *dev) { + struct net_device *ndev = dev->ndev; u8 __iomem *base = dev->base; /* the DP83820 will freeze counters, so we need to read all of them */ - dev->stats.rx_errors += readl(base + 0x60) & 0xffff; - dev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff; - dev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff; - dev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff; - /*dev->stats.rx_symbol_errors +=*/ readl(base + 0x70); - dev->stats.rx_length_errors += readl(base + 0x74) & 0xffff; - dev->stats.rx_length_errors += readl(base + 0x78) & 0xffff; - /*dev->stats.rx_badopcode_errors += */ readl(base + 0x7c); - /*dev->stats.rx_pause_count += */ readl(base + 0x80); - /*dev->stats.tx_pause_count += */ readl(base + 0x84); - dev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff; + ndev->stats.rx_errors += readl(base + 0x60) & 0xffff; + ndev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff; + ndev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff; + ndev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff; + /*ndev->stats.rx_symbol_errors +=*/ readl(base + 0x70); + ndev->stats.rx_length_errors += readl(base + 0x74) & 0xffff; + ndev->stats.rx_length_errors += readl(base + 0x78) & 0xffff; + /*ndev->stats.rx_badopcode_errors += */ readl(base + 0x7c); + /*ndev->stats.rx_pause_count += */ readl(base + 0x80); + /*ndev->stats.tx_pause_count += */ readl(base + 0x84); + ndev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff; } static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) @@ -1237,7 +1237,7 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) ns83820_update_stats(dev); spin_unlock_irq(&dev->misc_lock); - return &dev->stats; + return &ndev->stats; } /* Let ethtool retrieve info */ @@ -1464,12 +1464,12 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr) if (unlikely(ISR_RXSOVR & isr)) { //printk("overrun: rxsovr\n"); - dev->stats.rx_fifo_errors ++; + ndev->stats.rx_fifo_errors++; } if (unlikely(ISR_RXORN & isr)) { //printk("overrun: rxorn\n"); - dev->stats.rx_fifo_errors ++; + ndev->stats.rx_fifo_errors++; } if ((ISR_RXRCMP & isr) && dev->rx_info.up) diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c index 000e792d57c01c42d6129ccca4102f15b2000d05..b264f0f45605409b6cd378bb5b498b23bd73f994 100644 --- a/drivers/net/octeon/octeon_mgmt.c +++ b/drivers/net/octeon/octeon_mgmt.c @@ -620,7 +620,7 @@ static int octeon_mgmt_ioctl(struct net_device *netdev, if (!p->phydev) return -EINVAL; - return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); + return phy_mii_ioctl(p->phydev, rq, cmd); } static void octeon_mgmt_adjust_link(struct net_device *netdev) @@ -1067,7 +1067,7 @@ static const struct net_device_ops octeon_mgmt_ops = { #endif }; -static int __init octeon_mgmt_probe(struct platform_device *pdev) +static int __devinit octeon_mgmt_probe(struct platform_device *pdev) { struct resource *res_irq; struct net_device *netdev; @@ -1124,7 +1124,7 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev) return -ENOENT; } -static int __exit octeon_mgmt_remove(struct platform_device *pdev) +static int __devexit octeon_mgmt_remove(struct platform_device *pdev) { struct net_device *netdev = dev_get_drvdata(&pdev->dev); @@ -1139,7 +1139,7 @@ static struct platform_driver octeon_mgmt_driver = { .owner = THIS_MODULE, }, .probe = octeon_mgmt_probe, - .remove = __exit_p(octeon_mgmt_remove), + .remove = __devexit_p(octeon_mgmt_remove), }; extern void octeon_mdiobus_force_mod_depencency(void); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index cecdbbd549ecd24a5d1be8bf7da8dc19c8768129..4accd83d3dfe8152067408e4fc5dc7b454c4d95e 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -685,7 +685,7 @@ static int brcm_fet_config_intr(struct phy_device *phydev) } static struct phy_driver bcm5411_driver = { - .phy_id = 0x00206070, + .phy_id = PHY_ID_BCM5411, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5411", .features = PHY_GBIT_FEATURES | @@ -700,7 +700,7 @@ static struct phy_driver bcm5411_driver = { }; static struct phy_driver bcm5421_driver = { - .phy_id = 0x002060e0, + .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5421", .features = PHY_GBIT_FEATURES | @@ -715,7 +715,7 @@ static struct phy_driver bcm5421_driver = { }; static struct phy_driver bcm5461_driver = { - .phy_id = 0x002060c0, + .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", .features = PHY_GBIT_FEATURES | @@ -730,7 +730,7 @@ static struct phy_driver bcm5461_driver = { }; static struct phy_driver bcm5464_driver = { - .phy_id = 0x002060b0, + .phy_id = PHY_ID_BCM5464, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5464", .features = PHY_GBIT_FEATURES | @@ -745,7 +745,7 @@ static struct phy_driver bcm5464_driver = { }; static struct phy_driver bcm5481_driver = { - .phy_id = 0x0143bca0, + .phy_id = PHY_ID_BCM5481, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5481", .features = PHY_GBIT_FEATURES | @@ -760,7 +760,7 @@ static struct phy_driver bcm5481_driver = { }; static struct phy_driver bcm5482_driver = { - .phy_id = 0x0143bcb0, + .phy_id = PHY_ID_BCM5482, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5482", .features = PHY_GBIT_FEATURES | @@ -834,6 +834,21 @@ static struct phy_driver bcmac131_driver = { .driver = { .owner = THIS_MODULE }, }; +static struct phy_driver bcm5241_driver = { + .phy_id = PHY_ID_BCM5241, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM5241", + .features = PHY_BASIC_FEATURES | + SUPPORTED_Pause | SUPPORTED_Asym_Pause, + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = brcm_fet_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = brcm_fet_ack_interrupt, + .config_intr = brcm_fet_config_intr, + .driver = { .owner = THIS_MODULE }, +}; + static int __init broadcom_init(void) { int ret; @@ -868,8 +883,13 @@ static int __init broadcom_init(void) ret = phy_driver_register(&bcmac131_driver); if (ret) goto out_ac131; + ret = phy_driver_register(&bcm5241_driver); + if (ret) + goto out_5241; return ret; +out_5241: + phy_driver_unregister(&bcmac131_driver); out_ac131: phy_driver_unregister(&bcm57780_driver); out_57780: @@ -894,6 +914,7 @@ static int __init broadcom_init(void) static void __exit broadcom_exit(void) { + phy_driver_unregister(&bcm5241_driver); phy_driver_unregister(&bcmac131_driver); phy_driver_unregister(&bcm57780_driver); phy_driver_unregister(&bcm50610m_driver); @@ -910,16 +931,17 @@ module_init(broadcom_init); module_exit(broadcom_exit); static struct mdio_device_id broadcom_tbl[] = { - { 0x00206070, 0xfffffff0 }, - { 0x002060e0, 0xfffffff0 }, - { 0x002060c0, 0xfffffff0 }, - { 0x002060b0, 0xfffffff0 }, - { 0x0143bca0, 0xfffffff0 }, - { 0x0143bcb0, 0xfffffff0 }, + { PHY_ID_BCM5411, 0xfffffff0 }, + { PHY_ID_BCM5421, 0xfffffff0 }, + { PHY_ID_BCM5461, 0xfffffff0 }, + { PHY_ID_BCM5464, 0xfffffff0 }, + { PHY_ID_BCM5482, 0xfffffff0 }, + { PHY_ID_BCM5482, 0xfffffff0 }, { PHY_ID_BCM50610, 0xfffffff0 }, { PHY_ID_BCM50610M, 0xfffffff0 }, { PHY_ID_BCM57780, 0xfffffff0 }, { PHY_ID_BCMAC131, 0xfffffff0 }, + { PHY_ID_BCM5241, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 439adafeacb1bfed9974e13bc62ff8bb4651d3a7..3f2583f18a39c3756c6296055a811cb1f1122f4e 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -116,6 +116,8 @@ static struct phy_driver ip175c_driver = { .config_init = &ip175c_config_init, .config_aneg = &ip175c_config_aneg, .read_status = &ip175c_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, }; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5a1bd5db2a93b3bfc3540e5cd81b76d05b553e0e..0101f2bdf400e7b28eca39d0c9b7a413e2f0d326 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -68,6 +68,15 @@ #define MII_M1111_COPPER 0 #define MII_M1111_FIBER 1 +#define MII_88E1121_PHY_MSCR_PAGE 2 +#define MII_88E1121_PHY_MSCR_REG 21 +#define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) +#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) +#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) + +#define MII_88EC048_PHY_MSCR1_REG 16 +#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6) + #define MII_88E1121_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_PAGE 3 #define MII_88E1121_PHY_LED_DEF 0x0030 @@ -179,7 +188,30 @@ static int marvell_config_aneg(struct phy_device *phydev) static int m88e1121_config_aneg(struct phy_device *phydev) { - int err, temp; + int err, oldpage, mscr; + + oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + + err = phy_write(phydev, MII_88E1121_PHY_PAGE, + MII_88E1121_PHY_MSCR_PAGE); + if (err < 0) + return err; + mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) & + MII_88E1121_PHY_MSCR_DELAY_MASK; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | + MII_88E1121_PHY_MSCR_TX_DELAY); + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; + + err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); + if (err < 0) + return err; + + phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err < 0) @@ -190,17 +222,42 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, MII_88E1121_PHY_PAGE); + oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); - phy_write(phydev, MII_88E1121_PHY_PAGE, temp); + phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); err = genphy_config_aneg(phydev); return err; } +static int m88ec048_config_aneg(struct phy_device *phydev) +{ + int err, oldpage, mscr; + + oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + + err = phy_write(phydev, MII_88E1121_PHY_PAGE, + MII_88E1121_PHY_MSCR_PAGE); + if (err < 0) + return err; + + mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG); + mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD; + + err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); + if (err < 0) + return err; + + err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); + if (err < 0) + return err; + + return m88e1121_config_aneg(phydev); +} + static int m88e1111_config_init(struct phy_device *phydev) { int err; @@ -594,6 +651,19 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .driver = { .owner = THIS_MODULE }, }, + { + .phy_id = MARVELL_PHY_ID_88EC048, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88EC048", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &m88ec048_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .driver = { .owner = THIS_MODULE }, + }, { .phy_id = MARVELL_PHY_ID_88E1145, .phy_id_mask = MARVELL_PHY_ID_MASK, @@ -659,6 +729,7 @@ static struct mdio_device_id marvell_tbl[] = { { 0x01410cb0, 0xfffffff0 }, { 0x01410cd0, 0xfffffff0 }, { 0x01410e30, 0xfffffff0 }, + { 0x01410e90, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index f443d43edd80e242fba17e9ea3cf9de7c7b403fb..bd12ba941be51b9bb9dc4ee20e3dfb48e8bfae78 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -85,7 +85,7 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, return 0; } -static int __init octeon_mdiobus_probe(struct platform_device *pdev) +static int __devinit octeon_mdiobus_probe(struct platform_device *pdev) { struct octeon_mdiobus *bus; union cvmx_smix_en smi_en; @@ -143,7 +143,7 @@ static int __init octeon_mdiobus_probe(struct platform_device *pdev) return err; } -static int __exit octeon_mdiobus_remove(struct platform_device *pdev) +static int __devexit octeon_mdiobus_remove(struct platform_device *pdev) { struct octeon_mdiobus *bus; union cvmx_smix_en smi_en; @@ -163,7 +163,7 @@ static struct platform_driver octeon_mdiobus_driver = { .owner = THIS_MODULE, }, .probe = octeon_mdiobus_probe, - .remove = __exit_p(octeon_mdiobus_remove), + .remove = __devexit_p(octeon_mdiobus_remove), }; void octeon_mdiobus_force_mod_depencency(void) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 0692f750c404d6e78e101cad5f445f6f19f8d0fe..8bb7db676a5cc13b0651a8970fd25a27d7a79d48 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -12,7 +12,8 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * - * Support : ksz9021 , vsc8201, ks8001 + * Support : ksz9021 1000/100/10 phy from Micrel + * ks8001, ks8737, ks8721, ks8041, ks8051 100/10 phy */ #include @@ -20,37 +21,146 @@ #include #define PHY_ID_KSZ9021 0x00221611 -#define PHY_ID_VSC8201 0x000FC413 +#define PHY_ID_KS8737 0x00221720 +#define PHY_ID_KS8041 0x00221510 +#define PHY_ID_KS8051 0x00221550 +/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */ #define PHY_ID_KS8001 0x0022161A +/* general Interrupt control/status reg in vendor specific block. */ +#define MII_KSZPHY_INTCS 0x1B +#define KSZPHY_INTCS_JABBER (1 << 15) +#define KSZPHY_INTCS_RECEIVE_ERR (1 << 14) +#define KSZPHY_INTCS_PAGE_RECEIVE (1 << 13) +#define KSZPHY_INTCS_PARELLEL (1 << 12) +#define KSZPHY_INTCS_LINK_PARTNER_ACK (1 << 11) +#define KSZPHY_INTCS_LINK_DOWN (1 << 10) +#define KSZPHY_INTCS_REMOTE_FAULT (1 << 9) +#define KSZPHY_INTCS_LINK_UP (1 << 8) +#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\ + KSZPHY_INTCS_LINK_DOWN) + +/* general PHY control reg in vendor specific block. */ +#define MII_KSZPHY_CTRL 0x1F +/* bitmap of PHY register to set interrupt mode */ +#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) +#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) +#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) + +static int kszphy_ack_interrupt(struct phy_device *phydev) +{ + /* bit[7..0] int status, which is a read and clear register. */ + int rc; + + rc = phy_read(phydev, MII_KSZPHY_INTCS); + + return (rc < 0) ? rc : 0; +} + +static int kszphy_set_interrupt(struct phy_device *phydev) +{ + int temp; + temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ? + KSZPHY_INTCS_ALL : 0; + return phy_write(phydev, MII_KSZPHY_INTCS, temp); +} + +static int kszphy_config_intr(struct phy_device *phydev) +{ + int temp, rc; + + /* set the interrupt pin active low */ + temp = phy_read(phydev, MII_KSZPHY_CTRL); + temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH; + phy_write(phydev, MII_KSZPHY_CTRL, temp); + rc = kszphy_set_interrupt(phydev); + return rc < 0 ? rc : 0; +} + +static int ksz9021_config_intr(struct phy_device *phydev) +{ + int temp, rc; + + /* set the interrupt pin active low */ + temp = phy_read(phydev, MII_KSZPHY_CTRL); + temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH; + phy_write(phydev, MII_KSZPHY_CTRL, temp); + rc = kszphy_set_interrupt(phydev); + return rc < 0 ? rc : 0; +} + +static int ks8737_config_intr(struct phy_device *phydev) +{ + int temp, rc; + + /* set the interrupt pin active low */ + temp = phy_read(phydev, MII_KSZPHY_CTRL); + temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH; + phy_write(phydev, MII_KSZPHY_CTRL, temp); + rc = kszphy_set_interrupt(phydev); + return rc < 0 ? rc : 0; +} static int kszphy_config_init(struct phy_device *phydev) { return 0; } +static struct phy_driver ks8737_driver = { + .phy_id = PHY_ID_KS8737, + .phy_id_mask = 0x00fffff0, + .name = "Micrel KS8737", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = ks8737_config_intr, + .driver = { .owner = THIS_MODULE,}, +}; + +static struct phy_driver ks8041_driver = { + .phy_id = PHY_ID_KS8041, + .phy_id_mask = 0x00fffff0, + .name = "Micrel KS8041", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause + | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .driver = { .owner = THIS_MODULE,}, +}; -static struct phy_driver ks8001_driver = { - .phy_id = PHY_ID_KS8001, - .name = "Micrel KS8001", +static struct phy_driver ks8051_driver = { + .phy_id = PHY_ID_KS8051, .phy_id_mask = 0x00fffff0, - .features = PHY_BASIC_FEATURES, - .flags = PHY_POLL, + .name = "Micrel KS8051", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause + | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, .driver = { .owner = THIS_MODULE,}, }; -static struct phy_driver vsc8201_driver = { - .phy_id = PHY_ID_VSC8201, - .name = "Micrel VSC8201", +static struct phy_driver ks8001_driver = { + .phy_id = PHY_ID_KS8001, + .name = "Micrel KS8001 or KS8721", .phy_id_mask = 0x00fffff0, - .features = PHY_BASIC_FEATURES, - .flags = PHY_POLL, + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, .driver = { .owner = THIS_MODULE,}, }; @@ -58,11 +168,14 @@ static struct phy_driver ksz9021_driver = { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000fff10, .name = "Micrel KSZ9021 Gigabit PHY", - .features = PHY_GBIT_FEATURES | SUPPORTED_Pause, - .flags = PHY_POLL, + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause + | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = ksz9021_config_intr, .driver = { .owner = THIS_MODULE, }, }; @@ -73,17 +186,29 @@ static int __init ksphy_init(void) ret = phy_driver_register(&ks8001_driver); if (ret) goto err1; - ret = phy_driver_register(&vsc8201_driver); + + ret = phy_driver_register(&ksz9021_driver); if (ret) goto err2; - ret = phy_driver_register(&ksz9021_driver); + ret = phy_driver_register(&ks8737_driver); if (ret) goto err3; + ret = phy_driver_register(&ks8041_driver); + if (ret) + goto err4; + ret = phy_driver_register(&ks8051_driver); + if (ret) + goto err5; + return 0; +err5: + phy_driver_unregister(&ks8041_driver); +err4: + phy_driver_unregister(&ks8737_driver); err3: - phy_driver_unregister(&vsc8201_driver); + phy_driver_unregister(&ksz9021_driver); err2: phy_driver_unregister(&ks8001_driver); err1: @@ -93,8 +218,10 @@ static int __init ksphy_init(void) static void __exit ksphy_exit(void) { phy_driver_unregister(&ks8001_driver); - phy_driver_unregister(&vsc8201_driver); + phy_driver_unregister(&ks8737_driver); phy_driver_unregister(&ksz9021_driver); + phy_driver_unregister(&ks8041_driver); + phy_driver_unregister(&ks8051_driver); } module_init(ksphy_init); @@ -106,8 +233,10 @@ MODULE_LICENSE("GPL"); static struct mdio_device_id micrel_tbl[] = { { PHY_ID_KSZ9021, 0x000fff10 }, - { PHY_ID_VSC8201, 0x00fffff0 }, { PHY_ID_KS8001, 0x00fffff0 }, + { PHY_ID_KS8737, 0x00fffff0 }, + { PHY_ID_KS8041, 0x00fffff0 }, + { PHY_ID_KS8051, 0x00fffff0 }, { } }; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 64be4664ccab0c906f34ce05f49c66d7cbf6def3..5130db8f5c4ec4b2ced76a400071814c3e638c60 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -309,8 +309,9 @@ EXPORT_SYMBOL(phy_ethtool_gset); * current state. Use at own risk. */ int phy_mii_ioctl(struct phy_device *phydev, - struct mii_ioctl_data *mii_data, int cmd) + struct ifreq *ifr, int cmd) { + struct mii_ioctl_data *mii_data = if_mii(ifr); u16 val = mii_data->val_in; switch (cmd) { @@ -360,6 +361,11 @@ int phy_mii_ioctl(struct phy_device *phydev, } break; + case SIOCSHWTSTAMP: + if (phydev->drv->hwtstamp) + return phydev->drv->hwtstamp(phydev, ifr); + /* fall through */ + default: return -EOPNOTSUPP; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1a99bb2441064c5dae8014628bd718d033e752e6..c0761197c07e6a94e67e3d0d4f417d888e2ec68f 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -460,6 +460,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, } phydev->attached_dev = dev; + dev->phydev = phydev; phydev->dev_flags = flags; @@ -513,6 +514,7 @@ EXPORT_SYMBOL(phy_attach); */ void phy_detach(struct phy_device *phydev) { + phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; /* If the device had no specific driver before (i.e. - it diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 1b2c29150202080e75c2e9732b0150c204ed9161..6695a51e09e9b86340aa0cee342f2fb8d99e1159 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include @@ -69,7 +68,6 @@ #define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ -#define MIN_FRAG_SIZE 64 /* * An instance of /dev/ppp can be associated with either a ppp @@ -181,6 +179,7 @@ struct channel { * channel.downl. */ +static DEFINE_MUTEX(ppp_mutex); static atomic_t ppp_unit_count = ATOMIC_INIT(0); static atomic_t channel_count = ATOMIC_INIT(0); @@ -363,7 +362,6 @@ static const int npindex_to_ethertype[NUM_NP] = { */ static int ppp_open(struct inode *inode, struct file *file) { - cycle_kernel_lock(); /* * This could (should?) be enforced by the permissions on /dev/ppp. */ @@ -539,14 +537,9 @@ static int get_filter(void __user *arg, struct sock_filter **p) } len = uprog.len * sizeof(struct sock_filter); - code = kmalloc(len, GFP_KERNEL); - if (code == NULL) - return -ENOMEM; - - if (copy_from_user(code, uprog.filter, len)) { - kfree(code); - return -EFAULT; - } + code = memdup_user(uprog.filter, len); + if (IS_ERR(code)) + return PTR_ERR(code); err = sk_chk_filter(code, uprog.len); if (err) { @@ -588,7 +581,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * this fd and reopening /dev/ppp. */ err = -EINVAL; - lock_kernel(); + mutex_lock(&ppp_mutex); if (pf->kind == INTERFACE) { ppp = PF_TO_PPP(pf); if (file == ppp->owner) @@ -600,7 +593,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } else printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", atomic_long_read(&file->f_count)); - unlock_kernel(); + mutex_unlock(&ppp_mutex); return err; } @@ -608,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct channel *pch; struct ppp_channel *chan; - lock_kernel(); + mutex_lock(&ppp_mutex); pch = PF_TO_CHANNEL(pf); switch (cmd) { @@ -630,7 +623,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = chan->ops->ioctl(chan, cmd, arg); up_read(&pch->chan_sem); } - unlock_kernel(); + mutex_unlock(&ppp_mutex); return err; } @@ -640,7 +633,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; } - lock_kernel(); + mutex_lock(&ppp_mutex); ppp = PF_TO_PPP(pf); switch (cmd) { case PPPIOCSMRU: @@ -788,7 +781,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) default: err = -ENOTTY; } - unlock_kernel(); + mutex_unlock(&ppp_mutex); return err; } @@ -801,7 +794,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct ppp_net *pn; int __user *p = (int __user *)arg; - lock_kernel(); + mutex_lock(&ppp_mutex); switch (cmd) { case PPPIOCNEWUNIT: /* Create a new ppp unit */ @@ -852,7 +845,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, default: err = -ENOTTY; } - unlock_kernel(); + mutex_unlock(&ppp_mutex); return err; } @@ -1933,9 +1926,9 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) /* If the queue is getting long, don't wait any longer for packets before the start of the queue. */ if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { - struct sk_buff *skb = skb_peek(&ppp->mrq); - if (seq_before(ppp->minseq, skb->sequence)) - ppp->minseq = skb->sequence; + struct sk_buff *mskb = skb_peek(&ppp->mrq); + if (seq_before(ppp->minseq, mskb->sequence)) + ppp->minseq = mskb->sequence; } /* Pull completed packets off the queue and receive them. */ diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 805b64d1e893564bfc7aa1b3957c1df606dcf46a..344ef330e12320b41e486aae99e08a0d8990d9db 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -89,7 +89,6 @@ #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) #define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) -static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); static const struct proto_ops pppoe_ops; @@ -949,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) abort: kfree_skb(skb); - return 1; + return 0; } /************************************************************************ diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h index 0a88b535197aa830971252d4695c24e72f21dad1..f7e51b7d704960a2eb45ae63d4ed19f134d6d105 100644 --- a/drivers/net/ps3_gelic_wireless.h +++ b/drivers/net/ps3_gelic_wireless.h @@ -74,7 +74,7 @@ struct gelic_eurus_common_cfg { u16 bss_type; /* infra or adhoc */ u16 auth_method; /* shared key or open */ u16 op_mode; /* B/G */ -} __attribute__((packed)); +} __packed; /* for GELIC_EURUS_CMD_WEP_CFG */ @@ -88,7 +88,7 @@ struct gelic_eurus_wep_cfg { /* all fields are big endian */ u16 security; u8 key[4][16]; -} __attribute__((packed)); +} __packed; /* for GELIC_EURUS_CMD_WPA_CFG */ enum gelic_eurus_wpa_security { @@ -120,7 +120,7 @@ struct gelic_eurus_wpa_cfg { u16 security; u16 psk_type; /* psk key encoding type */ u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */ -} __attribute__((packed)); +} __packed; /* for GELIC_EURUS_CMD_{START,GET}_SCAN */ enum gelic_eurus_scan_capability { @@ -171,7 +171,7 @@ struct gelic_eurus_scan_info { __be32 reserved3; __be32 reserved4; u8 elements[0]; /* ie */ -} __attribute__ ((packed)); +} __packed; /* the hypervisor returns bbs up to 16 */ #define GELIC_EURUS_MAX_SCAN (16) @@ -193,7 +193,7 @@ struct gelic_wl_scan_info { struct gelic_eurus_rssi_info { /* big endian */ __be16 rssi; -} __attribute__ ((packed)); +} __packed; /* for 'stat' member of gelic_wl_info */ diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 54ebb65ada1863ecdc7ff97b15587d6516b1e3c7..6168a130f33fc1a18548427a73918d572db19107 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -5,6 +5,8 @@ * See LICENSE.qla3xxx for copyright and licensing details. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -36,14 +38,16 @@ #include "qla3xxx.h" -#define DRV_NAME "qla3xxx" -#define DRV_STRING "QLogic ISP3XXX Network Driver" +#define DRV_NAME "qla3xxx" +#define DRV_STRING "QLogic ISP3XXX Network Driver" #define DRV_VERSION "v2.03.00-k5" -#define PFX DRV_NAME " " static const char ql3xxx_driver_name[] = DRV_NAME; static const char ql3xxx_driver_version[] = DRV_VERSION; +#define TIMED_OUT_MSG \ +"Timed out waiting for management port to get free before issuing command\n" + MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); MODULE_LICENSE("GPL"); @@ -73,24 +77,24 @@ MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); /* * These are the known PHY's which are used */ -typedef enum { +enum PHY_DEVICE_TYPE { PHY_TYPE_UNKNOWN = 0, PHY_VITESSE_VSC8211, PHY_AGERE_ET1011C, MAX_PHY_DEV_TYPES -} PHY_DEVICE_et; - -typedef struct { - PHY_DEVICE_et phyDevice; - u32 phyIdOUI; - u16 phyIdModel; - char *name; -} PHY_DEVICE_INFO_t; - -static const PHY_DEVICE_INFO_t PHY_DEVICES[] = - {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, - {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, - {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, +}; + +struct PHY_DEVICE_INFO { + const enum PHY_DEVICE_TYPE phyDevice; + const u32 phyIdOUI; + const u16 phyIdModel; + const char *name; +}; + +static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { + {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, + {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, + {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, }; @@ -100,7 +104,8 @@ static const PHY_DEVICE_INFO_t PHY_DEVICES[] = static int ql_sem_spinlock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; u32 value; unsigned int seconds = 3; @@ -111,20 +116,22 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev, if ((value & (sem_mask >> 16)) == sem_bits) return 0; ssleep(1); - } while(--seconds); + } while (--seconds); return -1; } static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); readl(&port_regs->CommonRegs.semaphoreReg); } static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; u32 value; writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); @@ -139,32 +146,28 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) { int i = 0; - while (1) { - if (!ql_sem_lock(qdev, - QL_DRVR_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) - * 2) << 1)) { - if (i < 10) { - ssleep(1); - i++; - } else { - printk(KERN_ERR PFX "%s: Timed out waiting for " - "driver lock...\n", - qdev->ndev->name); - return 0; - } - } else { - printk(KERN_DEBUG PFX - "%s: driver lock acquired.\n", - qdev->ndev->name); + while (i < 10) { + if (i) + ssleep(1); + + if (ql_sem_lock(qdev, + QL_DRVR_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 1)) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "driver lock acquired\n"); return 1; } } + + netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); + return 0; } static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; writel(((ISP_CONTROL_NP_MASK << 16) | page), &port_regs->CommonRegs.ispControlStatus); @@ -172,8 +175,7 @@ static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) qdev->current_page = page; } -static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, - u32 __iomem * reg) +static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) { u32 value; unsigned long hw_flags; @@ -185,8 +187,7 @@ static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, return value; } -static u32 ql_read_common_reg(struct ql3_adapter *qdev, - u32 __iomem * reg) +static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { return readl(reg); } @@ -199,7 +200,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (qdev->current_page != 0) - ql_set_register_page(qdev,0); + ql_set_register_page(qdev, 0); value = readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); @@ -209,7 +210,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { if (qdev->current_page != 0) - ql_set_register_page(qdev,0); + ql_set_register_page(qdev, 0); return readl(reg); } @@ -243,7 +244,7 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 0) - ql_set_register_page(qdev,0); + ql_set_register_page(qdev, 0); writel(value, reg); readl(reg); } @@ -255,7 +256,7 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 1) - ql_set_register_page(qdev,1); + ql_set_register_page(qdev, 1); writel(value, reg); readl(reg); } @@ -267,14 +268,15 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 2) - ql_set_register_page(qdev,2); + ql_set_register_page(qdev, 2); writel(value, reg); readl(reg); } static void ql_disable_interrupts(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, (ISP_IMR_ENABLE_INT << 16)); @@ -283,7 +285,8 @@ static void ql_disable_interrupts(struct ql3_adapter *qdev) static void ql_enable_interrupts(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, ((0xff << 16) | ISP_IMR_ENABLE_INT)); @@ -308,8 +311,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { - printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); qdev->lrg_buf_skb_check++; } else { /* @@ -323,9 +325,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); - if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", - qdev->ndev->name, err); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; @@ -350,10 +353,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter *qdev) { - struct ql_rcv_buf_cb *lrg_buf_cb; + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; - if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) { - if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL) + if (lrg_buf_cb != NULL) { + qdev->lrg_buf_free_head = lrg_buf_cb->next; + if (qdev->lrg_buf_free_head == NULL) qdev->lrg_buf_free_tail = NULL; qdev->lrg_buf_free_count--; } @@ -374,13 +378,13 @@ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, static void fm93c56a_select(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; + u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; - ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data); - ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_nvram_reg(qdev, spir, + ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); } /* @@ -393,51 +397,40 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) u32 dataBit; u32 previousBit; struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; + u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Clock in a zero, then do the start bit */ - ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1); - ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | AUBURN_EEPROM_DO_1 | - AUBURN_EEPROM_CLK_RISE); - ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | AUBURN_EEPROM_DO_1 | - AUBURN_EEPROM_CLK_FALL); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); mask = 1 << (FM93C56A_CMD_BITS - 1); /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < FM93C56A_CMD_BITS; i++) { - dataBit = - (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; + dataBit = (cmd & mask) + ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { - /* - * If the bit changed, then change the DO state to - * match - */ - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit); + /* If the bit changed, change the DO state to match */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); previousBit = dataBit; } - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_RISE); - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_FALL); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); cmd = cmd << 1; } @@ -445,33 +438,24 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < addrBits; i++) { - dataBit = - (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : - AUBURN_EEPROM_DO_0; + dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match */ - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); previousBit = dataBit; } - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_RISE); - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev-> - eeprom_cmd_data | dataBit | - AUBURN_EEPROM_CLK_FALL); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); eepromAddr = eepromAddr << 1; } } @@ -482,10 +466,11 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) static void fm93c56a_deselect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; + u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; - ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); } /* @@ -497,29 +482,23 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) u32 data = 0; u32 dataBit; struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; + u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Read the data bits */ /* The first bit is a dummy. Clock right over it. */ for (i = 0; i < dataBits; i++) { - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_RISE); - ql_write_nvram_reg(qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_FALL); - dataBit = - (ql_read_common_reg - (qdev, - &port_regs->CommonRegs. - serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0; + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_FALL); + dataBit = (ql_read_common_reg(qdev, spir) & + AUBURN_EEPROM_DI_1) ? 1 : 0; data = (data << 1) | dataBit; } - *value = (u16) data; + *value = (u16)data; } /* @@ -551,13 +530,12 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev) spin_lock_irqsave(&qdev->hw_lock, hw_flags); - pEEPROMData = (u16 *) & qdev->nvram_data; + pEEPROMData = (u16 *)&qdev->nvram_data; qdev->eeprom_cmd_data = 0; - if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, + if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 10)) { - printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n", - __func__); + pr_err("%s: Failed ql_sem_spinlock()\n", __func__); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } @@ -570,8 +548,8 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev) ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); if (checksum != 0) { - printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n", - qdev->ndev->name, checksum); + netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", + checksum); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } @@ -587,7 +565,7 @@ static const u32 PHYAddr[2] = { static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 temp; int count = 1000; @@ -604,7 +582,7 @@ static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 scanControl; if (qdev->numPorts > 1) { @@ -632,7 +610,7 @@ static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) { u8 ret; struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; /* See if scan mode is enabled before we turn it off */ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & @@ -662,17 +640,13 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u8 scanWasEnabled; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s Timed out waiting for management port to " - "get free before issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -683,11 +657,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, /* Wait for write to complete 9/10/04 SJP */ if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Timed out waiting for management port to " - "get free before issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -698,21 +668,17 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, } static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, - u16 * value, u32 phyAddr) + u16 *value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u8 scanWasEnabled; u32 temp; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Timed out waiting for management port to " - "get free before issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -727,11 +693,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Timed out waiting for management port to " - "get free after issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -747,16 +709,12 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Timed out waiting for management port to " - "get free before issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -767,11 +725,7 @@ static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) /* Wait for write to complete. */ if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Timed out waiting for management port to " - "get free before issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -784,16 +738,12 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) { u32 temp; struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Timed out waiting for management port to " - "get free before issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -808,11 +758,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Timed out waiting for management port to " - "get free before issuing command.\n", - qdev->ndev->name); + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } @@ -898,7 +844,7 @@ static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) { - printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name); + netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); /* power down device bit 11 = 1 */ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); /* enable diagnostic mode bit 2 = 1 */ @@ -918,7 +864,8 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) /* point to hidden reg 0x2806 */ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); /* Write new PHYAD w/bit 5 set */ - ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); + ql_mii_write_reg_ex(qdev, 0x11, + 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); /* * Disable diagnostic mode bit 2 = 0 * Power up device bit 11 = 0 @@ -929,21 +876,19 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) ql_mii_write_reg(qdev, 0x1c, 0xfaf0); } -static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, - u16 phyIdReg0, u16 phyIdReg1) +static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, + u16 phyIdReg0, u16 phyIdReg1) { - PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; + enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; u32 oui; u16 model; int i; - if (phyIdReg0 == 0xffff) { + if (phyIdReg0 == 0xffff) return result; - } - if (phyIdReg1 == 0xffff) { + if (phyIdReg1 == 0xffff) return result; - } /* oui is split between two registers */ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); @@ -951,15 +896,13 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; /* Scan table for this PHY */ - for(i = 0; i < MAX_PHY_DEV_TYPES; i++) { - if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) - { + for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { + if ((oui == PHY_DEVICES[i].phyIdOUI) && + (model == PHY_DEVICES[i].phyIdModel)) { + netdev_info(qdev->ndev, "Phy: %s\n", + PHY_DEVICES[i].name); result = PHY_DEVICES[i].phyDevice; - - printk(KERN_INFO "%s: Phy: %s\n", - qdev->ndev->name, PHY_DEVICES[i].name); - - break; + break; } } @@ -970,9 +913,8 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev) { u16 reg; - switch(qdev->phyType) { - case PHY_AGERE_ET1011C: - { + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) return 0; @@ -980,20 +922,20 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev) break; } default: - if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) - return 0; + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; - reg = (((reg & 0x18) >> 3) & 3); + reg = (((reg & 0x18) >> 3) & 3); } - switch(reg) { - case 2: + switch (reg) { + case 2: return SPEED_1000; - case 1: + case 1: return SPEED_100; - case 0: + case 0: return SPEED_10; - default: + default: return -1; } } @@ -1002,17 +944,15 @@ static int ql_is_full_dup(struct ql3_adapter *qdev) { u16 reg; - switch(qdev->phyType) { - case PHY_AGERE_ET1011C: - { + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, ®)) return 0; return ((reg & 0x0080) && (reg & 0x1000)) != 0; } case PHY_VITESSE_VSC8211: - default: - { + default: { if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) return 0; return (reg & PHY_AUX_DUPLEX_STAT) != 0; @@ -1040,17 +980,15 @@ static int PHY_Setup(struct ql3_adapter *qdev) /* Determine the PHY we are using by reading the ID's */ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); - if(err != 0) { - printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", - qdev->ndev->name); - return err; + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); + return err; } err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); - if(err != 0) { - printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n", - qdev->ndev->name); - return err; + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); + return err; } /* Check if we have a Agere PHY */ @@ -1058,24 +996,22 @@ static int PHY_Setup(struct ql3_adapter *qdev) /* Determine which MII address we should be using determined by the index of the card */ - if (qdev->mac_index == 0) { + if (qdev->mac_index == 0) miiAddr = MII_AGERE_ADDR_1; - } else { + else miiAddr = MII_AGERE_ADDR_2; - } - err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); - if(err != 0) { - printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", - qdev->ndev->name); + err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); + if (err != 0) { + netdev_err(qdev->ndev, + "Could not read from reg PHY_ID_0_REG after Agere detected\n"); return err; } err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); - if(err != 0) { - printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", - qdev->ndev->name); - return err; + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); + return err; } /* We need to remember to initialize the Agere PHY */ @@ -1090,7 +1026,7 @@ static int PHY_Setup(struct ql3_adapter *qdev) /* need this here so address gets changed */ phyAgereSpecificInit(qdev, miiAddr); } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { - printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); + netdev_err(qdev->ndev, "PHY is unknown\n"); return -EIO; } @@ -1103,7 +1039,7 @@ static int PHY_Setup(struct ql3_adapter *qdev) static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 value; if (enable) @@ -1123,7 +1059,7 @@ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 value; if (enable) @@ -1143,7 +1079,7 @@ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 value; if (enable) @@ -1163,7 +1099,7 @@ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 value; if (enable) @@ -1183,7 +1119,7 @@ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 value; if (enable) @@ -1205,7 +1141,7 @@ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) static int ql_is_fiber(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; @@ -1235,7 +1171,7 @@ static int ql_is_auto_cfg(struct ql3_adapter *qdev) static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; @@ -1250,18 +1186,11 @@ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { - if (netif_msg_link(qdev)) - printk(KERN_INFO PFX - "%s: Auto-Negotiate complete.\n", - qdev->ndev->name); + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); return 1; - } else { - if (netif_msg_link(qdev)) - printk(KERN_WARNING PFX - "%s: Auto-Negotiate incomplete.\n", - qdev->ndev->name); - return 0; } + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); + return 0; } /* @@ -1278,7 +1207,7 @@ static int ql_is_neg_pause(struct ql3_adapter *qdev) static int ql_auto_neg_error(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; @@ -1316,7 +1245,7 @@ static int ql_is_link_full_dup(struct ql3_adapter *qdev) static int ql_link_down_detect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; @@ -1340,7 +1269,7 @@ static int ql_link_down_detect(struct ql3_adapter *qdev) static int ql_link_down_detect_clear(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; switch (qdev->mac_index) { case 0: @@ -1370,7 +1299,7 @@ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; @@ -1387,16 +1316,13 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { - if (netif_msg_link(qdev)) - printk(KERN_DEBUG PFX - "%s: is not link master.\n", qdev->ndev->name); + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "not link master\n"); return 0; - } else { - if (netif_msg_link(qdev)) - printk(KERN_DEBUG PFX - "%s: is link master.\n", qdev->ndev->name); - return 1; } + + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); + return 1; } static void ql_phy_reset_ex(struct ql3_adapter *qdev) @@ -1410,19 +1336,20 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) u16 reg; u16 portConfiguration; - if(qdev->phyType == PHY_AGERE_ET1011C) { - /* turn off external loopback */ + if (qdev->phyType == PHY_AGERE_ET1011C) ql_mii_write_reg(qdev, 0x13, 0x0000); - } + /* turn off external loopback */ - if(qdev->mac_index == 0) - portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; + if (qdev->mac_index == 0) + portConfiguration = + qdev->nvram_data.macCfg_port0.portConfiguration; else - portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; + portConfiguration = + qdev->nvram_data.macCfg_port1.portConfiguration; /* Some HBA's in the field are set to 0 and they need to be reinterpreted with a default value */ - if(portConfiguration == 0) + if (portConfiguration == 0) portConfiguration = PORT_CONFIG_DEFAULT; /* Set the 1000 advertisements */ @@ -1430,8 +1357,8 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) PHYAddr[qdev->mac_index]); reg &= ~PHY_GIG_ALL_PARAMS; - if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { - if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) reg |= PHY_GIG_ADV_1000F; else reg |= PHY_GIG_ADV_1000H; @@ -1445,29 +1372,27 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) PHYAddr[qdev->mac_index]); reg &= ~PHY_NEG_ALL_PARAMS; - if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) + if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; - if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { - if(portConfiguration & PORT_CONFIG_100MB_SPEED) + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100F; - if(portConfiguration & PORT_CONFIG_10MB_SPEED) + if (portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10F; } - if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { - if(portConfiguration & PORT_CONFIG_100MB_SPEED) + if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100H; - if(portConfiguration & PORT_CONFIG_10MB_SPEED) + if (portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10H; } - if(portConfiguration & - PORT_CONFIG_1000MB_SPEED) { + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) reg |= 1; - } ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, PHYAddr[qdev->mac_index]); @@ -1492,7 +1417,7 @@ static void ql_phy_init_ex(struct ql3_adapter *qdev) static u32 ql_get_link_state(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp, linkState; @@ -1504,22 +1429,22 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev) bitToCheck = PORT_STATUS_UP1; break; } + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (temp & bitToCheck) { + if (temp & bitToCheck) linkState = LS_UP; - } else { + else linkState = LS_DOWN; - } + return linkState; } static int ql_port_start(struct ql3_adapter *qdev) { - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { - printk(KERN_ERR "%s: Could not get hw lock for GIO\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); return -1; } @@ -1537,19 +1462,16 @@ static int ql_port_start(struct ql3_adapter *qdev) static int ql_finish_auto_neg(struct ql3_adapter *qdev) { - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (!ql_auto_neg_error(qdev)) { - if (test_bit(QL_LINK_MASTER,&qdev->flags)) { + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { /* configure the MAC */ - if (netif_msg_link(qdev)) - printk(KERN_DEBUG PFX - "%s: Configuring link.\n", - qdev->ndev-> - name); + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Configuring link\n"); ql_mac_cfg_soft_reset(qdev, 1); ql_mac_cfg_gig(qdev, (ql_get_link_speed @@ -1564,43 +1486,32 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev) ql_mac_cfg_soft_reset(qdev, 0); /* enable the MAC */ - if (netif_msg_link(qdev)) - printk(KERN_DEBUG PFX - "%s: Enabling mac.\n", - qdev->ndev-> - name); + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Enabling mac\n"); ql_mac_enable(qdev, 1); } qdev->port_link_state = LS_UP; netif_start_queue(qdev->ndev); netif_carrier_on(qdev->ndev); - if (netif_msg_link(qdev)) - printk(KERN_INFO PFX - "%s: Link is up at %d Mbps, %s duplex.\n", - qdev->ndev->name, - ql_get_link_speed(qdev), - ql_is_link_full_dup(qdev) - ? "full" : "half"); + netif_info(qdev, link, qdev->ndev, + "Link is up at %d Mbps, %s duplex\n", + ql_get_link_speed(qdev), + ql_is_link_full_dup(qdev) ? "full" : "half"); } else { /* Remote error detected */ - if (test_bit(QL_LINK_MASTER,&qdev->flags)) { - if (netif_msg_link(qdev)) - printk(KERN_DEBUG PFX - "%s: Remote error detected. " - "Calling ql_port_start().\n", - qdev->ndev-> - name); + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Remote error detected. Calling ql_port_start()\n"); /* * ql_port_start() is shared code and needs * to lock the PHY on it's own. */ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - if(ql_port_start(qdev)) {/* Restart port */ + if (ql_port_start(qdev)) /* Restart port */ return -1; - } else - return 0; + return 0; } } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); @@ -1619,33 +1530,28 @@ static void ql_link_state_machine_work(struct work_struct *work) curr_link_state = ql_get_link_state(qdev); - if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) { - if (netif_msg_link(qdev)) - printk(KERN_INFO PFX - "%s: Reset in progress, skip processing link " - "state.\n", qdev->ndev->name); + if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { + netif_info(qdev, link, qdev->ndev, + "Reset in progress, skip processing link state\n"); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ - mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\ + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); return; } switch (qdev->port_link_state) { default: - if (test_bit(QL_LINK_MASTER,&qdev->flags)) { + if (test_bit(QL_LINK_MASTER, &qdev->flags)) ql_port_start(qdev); - } qdev->port_link_state = LS_DOWN; /* Fall Through */ case LS_DOWN: if (curr_link_state == LS_UP) { - if (netif_msg_link(qdev)) - printk(KERN_INFO PFX "%s: Link is up.\n", - qdev->ndev->name); + netif_info(qdev, link, qdev->ndev, "Link is up\n"); if (ql_is_auto_neg_complete(qdev)) ql_finish_auto_neg(qdev); @@ -1662,9 +1568,7 @@ static void ql_link_state_machine_work(struct work_struct *work) * back up */ if (curr_link_state == LS_DOWN) { - if (netif_msg_link(qdev)) - printk(KERN_INFO PFX "%s: Link is down.\n", - qdev->ndev->name); + netif_info(qdev, link, qdev->ndev, "Link is down\n"); qdev->port_link_state = LS_DOWN; } if (ql_link_down_detect(qdev)) @@ -1683,9 +1587,9 @@ static void ql_link_state_machine_work(struct work_struct *work) static void ql_get_phy_owner(struct ql3_adapter *qdev) { if (ql_this_adapter_controls_port(qdev)) - set_bit(QL_LINK_MASTER,&qdev->flags); + set_bit(QL_LINK_MASTER, &qdev->flags); else - clear_bit(QL_LINK_MASTER,&qdev->flags); + clear_bit(QL_LINK_MASTER, &qdev->flags); } /* @@ -1695,7 +1599,7 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev) { ql_mii_enable_scan_mode(qdev); - if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { if (ql_this_adapter_controls_port(qdev)) ql_petbi_init_ex(qdev); } else { @@ -1705,18 +1609,18 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev) } /* - * MII_Setup needs to be called before taking the PHY out of reset so that the - * management interface clock speed can be set properly. It would be better if - * we had a way to disable MDC until after the PHY is out of reset, but we - * don't have that capability. + * MII_Setup needs to be called before taking the PHY out of reset + * so that the management interface clock speed can be set properly. + * It would be better if we had a way to disable MDC until after the + * PHY is out of reset, but we don't have that capability. */ static int ql_mii_setup(struct ql3_adapter *qdev) { u32 reg; struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; @@ -1735,24 +1639,24 @@ static int ql_mii_setup(struct ql3_adapter *qdev) return 0; } +#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ + SUPPORTED_FIBRE | \ + SUPPORTED_Autoneg) +#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP); \ + static u32 ql_supported_modes(struct ql3_adapter *qdev) { - u32 supported; + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) + return SUPPORTED_OPTICAL_MODES; - if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { - supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE - | SUPPORTED_Autoneg; - } else { - supported = SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full - | SUPPORTED_Autoneg | SUPPORTED_TP; - } - - return supported; + return SUPPORTED_TP_MODES; } static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) @@ -1760,9 +1664,9 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } @@ -1777,9 +1681,9 @@ static u32 ql_get_speed(struct ql3_adapter *qdev) u32 status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } @@ -1794,9 +1698,9 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } @@ -1806,7 +1710,6 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) return status; } - static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct ql3_adapter *qdev = netdev_priv(ndev); @@ -1814,7 +1717,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = ql_supported_modes(qdev); - if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) { + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { ecmd->port = PORT_FIBRE; } else { ecmd->port = PORT_TP; @@ -1855,10 +1758,11 @@ static void ql_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; u32 reg; - if(qdev->mac_index == 0) + if (qdev->mac_index == 0) reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); else reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); @@ -1885,12 +1789,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) while (lrg_buf_cb) { if (!lrg_buf_cb->skb) { - lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, - qdev->lrg_buffer_len); + lrg_buf_cb->skb = + netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { - printk(KERN_DEBUG PFX - "%s: Failed netdev_alloc_skb().\n", - qdev->ndev->name); + netdev_printk(KERN_DEBUG, qdev->ndev, + "Failed netdev_alloc_skb()\n"); break; } else { /* @@ -1905,9 +1809,10 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); - if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", - qdev->ndev->name, err); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; break; @@ -1915,9 +1820,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); + cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); + cpu_to_le32(MS_64BITS(map)); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - @@ -1937,7 +1842,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) */ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + if (qdev->small_buf_release_cnt >= 16) { while (qdev->small_buf_release_cnt >= 16) { qdev->small_buf_q_producer_index++; @@ -1961,7 +1868,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) struct bufq_addr_element *lrg_buf_q_ele; int i; struct ql_rcv_buf_cb *lrg_buf_cb; - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; if ((qdev->lrg_buf_free_count >= 8) && (qdev->lrg_buf_release_cnt >= 16)) { @@ -1989,7 +1897,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) qdev->lrg_buf_q_producer_index++; - if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) + if (qdev->lrg_buf_q_producer_index == + qdev->num_lbufq_entries) qdev->lrg_buf_q_producer_index = 0; if (qdev->lrg_buf_q_producer_index == @@ -2011,23 +1920,26 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, int i; int retval = 0; - if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { - printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_warn(qdev->ndev, + "Frame too short but it was padded and sent\n"); } tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ - if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { - printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_err(qdev->ndev, + "Frame too short to be legal, frame not sent\n"); qdev->ndev->stats.tx_errors++; retval = -EIO; goto frame_not_sent; } - if(tx_cb->seg_count == 0) { - printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); + if (tx_cb->seg_count == 0) { + netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", + mac_rsp->transaction_id); qdev->ndev->stats.tx_errors++; retval = -EIO; @@ -2073,7 +1985,7 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) qdev->lrg_buf_release_cnt++; if (++qdev->lrg_buf_index == qdev->num_large_buffers) qdev->lrg_buf_index = 0; - return(lrg_buf_cb); + return lrg_buf_cb; } /* @@ -2177,12 +2089,11 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, if (checksum & (IB_IP_IOCB_RSP_3032_ICE | IB_IP_IOCB_RSP_3032_CE)) { - printk(KERN_ERR - "%s: Bad checksum for this %s packet, checksum = %x.\n", - __func__, - ((checksum & - IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : - "UDP"),checksum); + netdev_err(ndev, + "%s: Bad checksum for this %s packet, checksum = %x\n", + __func__, + ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? + "TCP" : "UDP"), checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || (checksum & IB_IP_IOCB_RSP_3032_UDP && !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { @@ -2215,8 +2126,8 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, net_rsp = qdev->rsp_current; rmb(); /* - * Fix 4032 chipe undocumented "feature" where bit-8 is set if the - * inbound completion is for a VLAN. + * Fix 4032 chip's undocumented "feature" where bit-8 is set + * if the inbound completion is for a VLAN. */ if (qdev->device_id == QL3032_DEVICE_ID) net_rsp->opcode &= 0x7f; @@ -2242,22 +2153,18 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, net_rsp); (*rx_cleaned)++; break; - default: - { - u32 *tmp = (u32 *) net_rsp; - printk(KERN_ERR PFX - "%s: Hit default case, not " - "handled!\n" - " dropping the packet, opcode = " - "%x.\n", - ndev->name, net_rsp->opcode); - printk(KERN_ERR PFX - "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", - (unsigned long int)tmp[0], - (unsigned long int)tmp[1], - (unsigned long int)tmp[2], - (unsigned long int)tmp[3]); - } + default: { + u32 *tmp = (u32 *)net_rsp; + netdev_err(ndev, + "Hit default case, not handled!\n" + " dropping the packet, opcode = %x\n" + "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", + net_rsp->opcode, + (unsigned long int)tmp[0], + (unsigned long int)tmp[1], + (unsigned long int)tmp[2], + (unsigned long int)tmp[3]); + } } qdev->rsp_consumer_index++; @@ -2280,7 +2187,8 @@ static int ql_poll(struct napi_struct *napi, int budget) struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); int rx_cleaned = 0, tx_cleaned = 0; unsigned long hw_flags; - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); @@ -2303,15 +2211,14 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; u32 value; int handled = 1; u32 var; - port_regs = qdev->mem_map_registers; - - value = - ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); + value = ql_read_common_reg_l(qdev, + &port_regs->CommonRegs.ispControlStatus); if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { spin_lock(&qdev->adapter_lock); @@ -2319,7 +2226,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; - set_bit(QL_RESET_ACTIVE,&qdev->flags) ; + set_bit(QL_RESET_ACTIVE, &qdev->flags) ; if (value & ISP_CONTROL_FE) { /* @@ -2328,69 +2235,53 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) var = ql_read_page0_reg_l(qdev, &port_regs->PortFatalErrStatus); - printk(KERN_WARNING PFX - "%s: Resetting chip. PortFatalErrStatus " - "register = 0x%x\n", ndev->name, var); - set_bit(QL_RESET_START,&qdev->flags) ; + netdev_warn(ndev, + "Resetting chip. PortFatalErrStatus register = 0x%x\n", + var); + set_bit(QL_RESET_START, &qdev->flags) ; } else { /* * Soft Reset Requested. */ - set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; - printk(KERN_ERR PFX - "%s: Another function issued a reset to the " - "chip. ISR value = %x.\n", ndev->name, value); + set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; + netdev_err(ndev, + "Another function issued a reset to the chip. ISR value = %x\n", + value); } queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); - if (likely(napi_schedule_prep(&qdev->napi))) { + if (likely(napi_schedule_prep(&qdev->napi))) __napi_schedule(&qdev->napi); - } - } else { + } else return IRQ_NONE; - } return IRQ_RETVAL(handled); } /* - * Get the total number of segments needed for the - * given number of fragments. This is necessary because - * outbound address lists (OAL) will be used when more than - * two frags are given. Each address list has 5 addr/len - * pairs. The 5th pair in each AOL is used to point to - * the next AOL if more frags are coming. - * That is why the frags:segment count ratio is not linear. + * Get the total number of segments needed for the given number of fragments. + * This is necessary because outbound address lists (OAL) will be used when + * more than two frags are given. Each address list has 5 addr/len pairs. + * The 5th pair in each OAL is used to point to the next OAL if more frags + * are coming. That is why the frags:segment count ratio is not linear. */ -static int ql_get_seg_count(struct ql3_adapter *qdev, - unsigned short frags) +static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) { if (qdev->device_id == QL3022_DEVICE_ID) return 1; - switch(frags) { - case 0: return 1; /* just the skb->data seg */ - case 1: return 2; /* skb->data + 1 frag */ - case 2: return 3; /* skb->data + 2 frags */ - case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ - case 4: return 6; - case 5: return 7; - case 6: return 8; - case 7: return 10; - case 8: return 11; - case 9: return 12; - case 10: return 13; - case 11: return 15; - case 12: return 16; - case 13: return 17; - case 14: return 18; - case 15: return 20; - case 16: return 21; - case 17: return 22; - case 18: return 23; - } + if (frags <= 2) + return frags + 1; + else if (frags <= 6) + return frags + 2; + else if (frags <= 10) + return frags + 3; + else if (frags <= 14) + return frags + 4; + else if (frags <= 18) + return frags + 5; return -1; } @@ -2413,8 +2304,8 @@ static void ql_hw_csum_setup(const struct sk_buff *skb, } /* - * Map the buffers for this transmit. This will return - * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. + * Map the buffers for this transmit. + * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */ static int ql_send_map(struct ql3_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, @@ -2437,9 +2328,9 @@ static int ql_send_map(struct ql3_adapter *qdev, map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); - if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", - qdev->ndev->name, err); + if (err) { + netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", + err); return NETDEV_TX_BUSY; } @@ -2455,65 +2346,67 @@ static int ql_send_map(struct ql3_adapter *qdev, if (seg_cnt == 1) { /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); - } else { - oal = tx_cb->oal; - for (completed_segs=0; completed_segsfrags[completed_segs]; - oal_entry++; - if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ - (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ - (seg == 12 && seg_cnt > 13) || /* but necessary. */ - (seg == 17 && seg_cnt > 18)) { - /* Continuation entry points to outbound address list. */ - map = pci_map_single(qdev->pdev, oal, - sizeof(struct oal), - PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if(err) { - - printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", - qdev->ndev->name, err); - goto map_error; - } - - oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); - oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = - cpu_to_le32(sizeof(struct oal) | - OAL_CONT_ENTRY); - dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, - map); - dma_unmap_len_set(&tx_cb->map[seg], maplen, - sizeof(struct oal)); - oal_entry = (struct oal_entry *)oal; - oal++; - seg++; - } - - map = - pci_map_page(qdev->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); + return NETDEV_TX_OK; + } + oal = tx_cb->oal; + for (completed_segs = 0; + completed_segs < frag_cnt; + completed_segs++, seg++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; + oal_entry++; + /* + * Check for continuation requirements. + * It's strange but necessary. + * Continuation entry points to outbound address list. + */ + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { + map = pci_map_single(qdev->pdev, oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); - if(err) { - printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", - qdev->ndev->name, err); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping outbound address list with error: %d\n", + err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = cpu_to_le32(frag->size); + oal_entry->len = cpu_to_le32(sizeof(struct oal) | + OAL_CONT_ENTRY); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_len_set(&tx_cb->map[seg], maplen, - frag->size); + sizeof(struct oal)); + oal_entry = (struct oal_entry *)oal; + oal++; + seg++; + } + + map = pci_map_page(qdev->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping frags failed with error: %d\n", + err); + goto map_error; } - /* Terminate the last segment. */ - oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); - } + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(frag->size); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); + } + /* Terminate the last segment. */ + oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); return NETDEV_TX_OK; map_error: @@ -2525,13 +2418,18 @@ static int ql_send_map(struct ql3_adapter *qdev, seg = 1; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal = tx_cb->oal; - for (i=0; i 3) || /* Check for continuation */ - (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ - (seg == 12 && seg_cnt > 13) || /* but necessary. */ - (seg == 17 && seg_cnt > 18)) { + /* + * Check for continuation requirements. + * It's strange but necessary. + */ + + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { pci_unmap_single(qdev->pdev, dma_unmap_addr(&tx_cb->map[seg], mapaddr), dma_unmap_len(&tx_cb->map[seg], maplen), @@ -2570,19 +2468,20 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; struct ql_tx_buf_cb *tx_cb; u32 tot_len = skb->len; struct ob_mac_iocb_req *mac_iocb_ptr; - if (unlikely(atomic_read(&qdev->tx_count) < 2)) { + if (unlikely(atomic_read(&qdev->tx_count) < 2)) return NETDEV_TX_BUSY; - } - tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; - if((tx_cb->seg_count = ql_get_seg_count(qdev, - (skb_shinfo(skb)->nr_frags))) == -1) { - printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); + tx_cb = &qdev->tx_buf[qdev->req_producer_index]; + tx_cb->seg_count = ql_get_seg_count(qdev, + skb_shinfo(skb)->nr_frags); + if (tx_cb->seg_count == -1) { + netdev_err(ndev, "%s: invalid segment count!\n", __func__); return NETDEV_TX_OK; } @@ -2598,8 +2497,8 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, skb->ip_summed == CHECKSUM_PARTIAL) ql_hw_csum_setup(skb, mac_iocb_ptr); - if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { - printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); + if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { + netdev_err(ndev, "%s: Could not map the segments!\n", __func__); return NETDEV_TX_BUSY; } @@ -2612,9 +2511,9 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, &port_regs->CommonRegs.reqQProducerIndex, qdev->req_producer_index); - if (netif_msg_tx_queued(qdev)) - printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", - ndev->name, qdev->req_producer_index, skb->len); + netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, + "tx queued, slot %d, len %d\n", + qdev->req_producer_index, skb->len); atomic_dec(&qdev->tx_count); return NETDEV_TX_OK; @@ -2632,8 +2531,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) if ((qdev->req_q_virt_addr == NULL) || LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { - printk(KERN_ERR PFX "%s: reqQ failed.\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "reqQ failed\n"); return -ENOMEM; } @@ -2646,25 +2544,22 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) if ((qdev->rsp_q_virt_addr == NULL) || LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { - printk(KERN_ERR PFX - "%s: rspQ allocation failed\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "rspQ allocation failed\n"); pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); return -ENOMEM; } - set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); + set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); return 0; } static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) { - if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) { - printk(KERN_INFO PFX - "%s: Already done.\n", qdev->ndev->name); + if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); return; } @@ -2680,34 +2575,34 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) qdev->rsp_q_virt_addr = NULL; - clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags); + clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); } static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) { /* Create Large Buffer Queue */ qdev->lrg_buf_q_size = - qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); + qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); if (qdev->lrg_buf_q_size < PAGE_SIZE) qdev->lrg_buf_q_alloc_size = PAGE_SIZE; else qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; - qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL); + qdev->lrg_buf = + kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), + GFP_KERNEL); if (qdev->lrg_buf == NULL) { - printk(KERN_ERR PFX - "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); + netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); return -ENOMEM; } qdev->lrg_buf_q_alloc_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->lrg_buf_q_alloc_size, - &qdev->lrg_buf_q_alloc_phy_addr); + pci_alloc_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + &qdev->lrg_buf_q_alloc_phy_addr); if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { - printk(KERN_ERR PFX - "%s: lBufQ failed\n", qdev->ndev->name); + netdev_err(qdev->ndev, "lBufQ failed\n"); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; @@ -2715,21 +2610,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) /* Create Small Buffer Queue */ qdev->small_buf_q_size = - NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); + NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); if (qdev->small_buf_q_size < PAGE_SIZE) qdev->small_buf_q_alloc_size = PAGE_SIZE; else qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; qdev->small_buf_q_alloc_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->small_buf_q_alloc_size, - &qdev->small_buf_q_alloc_phy_addr); + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + &qdev->small_buf_q_alloc_phy_addr); if (qdev->small_buf_q_alloc_virt_addr == NULL) { - printk(KERN_ERR PFX - "%s: Small Buffer Queue allocation failed.\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); @@ -2738,18 +2631,17 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; - set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); + set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); return 0; } static void ql_free_buffer_queues(struct ql3_adapter *qdev) { - if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) { - printk(KERN_INFO PFX - "%s: Already done.\n", qdev->ndev->name); + if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); return; } - if(qdev->lrg_buf) kfree(qdev->lrg_buf); + kfree(qdev->lrg_buf); pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, @@ -2764,7 +2656,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev) qdev->small_buf_q_virt_addr = NULL; - clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags); + clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); } static int ql_alloc_small_buffers(struct ql3_adapter *qdev) @@ -2774,18 +2666,16 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev) /* Currently we allocate on one of memory and use it for smallbuffers */ qdev->small_buf_total_size = - (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * - QL_SMALL_BUFFER_SIZE); + (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * + QL_SMALL_BUFFER_SIZE); qdev->small_buf_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->small_buf_total_size, - &qdev->small_buf_phy_addr); + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_total_size, + &qdev->small_buf_phy_addr); if (qdev->small_buf_virt_addr == NULL) { - printk(KERN_ERR PFX - "%s: Failed to get small buffer memory.\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); return -ENOMEM; } @@ -2804,15 +2694,14 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev) small_buf_q_entry++; } qdev->small_buf_index = 0; - set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags); + set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); return 0; } static void ql_free_small_buffers(struct ql3_adapter *qdev) { - if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) { - printk(KERN_INFO PFX - "%s: Already done.\n", qdev->ndev->name); + if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); return; } if (qdev->small_buf_virt_addr != NULL) { @@ -2874,11 +2763,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) qdev->lrg_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ - printk(KERN_ERR PFX - "%s: large buff alloc failed, " - "for %d bytes at index %d.\n", - qdev->ndev->name, - qdev->lrg_buffer_len * 2, i); + netdev_err(qdev->ndev, + "large buff alloc failed for %d bytes at index %d\n", + qdev->lrg_buffer_len * 2, i); ql_free_large_buffers(qdev); return -ENOMEM; } else { @@ -2899,9 +2786,10 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(qdev->pdev, map); - if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", - qdev->ndev->name, err); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); ql_free_large_buffers(qdev); return -ENOMEM; } @@ -2926,10 +2814,8 @@ static void ql_free_send_free_list(struct ql3_adapter *qdev) tx_cb = &qdev->tx_buf[0]; for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - if (tx_cb->oal) { - kfree(tx_cb->oal); - tx_cb->oal = NULL; - } + kfree(tx_cb->oal); + tx_cb->oal = NULL; tx_cb++; } } @@ -2938,8 +2824,7 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; - struct ob_mac_iocb_req *req_q_curr = - qdev->req_q_virt_addr; + struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; /* Create free list of transmit buffers */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { @@ -2960,23 +2845,22 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev) if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = NORMAL_MTU_SIZE; - } - else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { + } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { /* * Bigger buffers, so less of them. */ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = JUMBO_MTU_SIZE; } else { - printk(KERN_ERR PFX - "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", + qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); return -ENOMEM; } - qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; + qdev->num_large_buffers = + qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; qdev->max_frame_size = - (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; + (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; /* * First allocate a page of shared memory and use it for shadow @@ -2984,51 +2868,44 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev) * Network Completion Queue Producer Index Register */ qdev->shadow_reg_virt_addr = - pci_alloc_consistent(qdev->pdev, - PAGE_SIZE, &qdev->shadow_reg_phy_addr); + pci_alloc_consistent(qdev->pdev, + PAGE_SIZE, &qdev->shadow_reg_phy_addr); if (qdev->shadow_reg_virt_addr != NULL) { qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr; qdev->req_consumer_index_phy_addr_high = - MS_64BITS(qdev->shadow_reg_phy_addr); + MS_64BITS(qdev->shadow_reg_phy_addr); qdev->req_consumer_index_phy_addr_low = - LS_64BITS(qdev->shadow_reg_phy_addr); + LS_64BITS(qdev->shadow_reg_phy_addr); qdev->prsp_producer_index = - (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); + (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); qdev->rsp_producer_index_phy_addr_high = - qdev->req_consumer_index_phy_addr_high; + qdev->req_consumer_index_phy_addr_high; qdev->rsp_producer_index_phy_addr_low = - qdev->req_consumer_index_phy_addr_low + 8; + qdev->req_consumer_index_phy_addr_low + 8; } else { - printk(KERN_ERR PFX - "%s: shadowReg Alloc failed.\n", qdev->ndev->name); + netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); return -ENOMEM; } if (ql_alloc_net_req_rsp_queues(qdev) != 0) { - printk(KERN_ERR PFX - "%s: ql_alloc_net_req_rsp_queues failed.\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); goto err_req_rsp; } if (ql_alloc_buffer_queues(qdev) != 0) { - printk(KERN_ERR PFX - "%s: ql_alloc_buffer_queues failed.\n", - qdev->ndev->name); + netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); goto err_buffer_queues; } if (ql_alloc_small_buffers(qdev) != 0) { - printk(KERN_ERR PFX - "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name); + netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); goto err_small_buffers; } if (ql_alloc_large_buffers(qdev) != 0) { - printk(KERN_ERR PFX - "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name); + netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); goto err_small_buffers; } @@ -3076,7 +2953,7 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev) struct ql3xxx_local_ram_registers __iomem *local_ram = (void __iomem *)qdev->mem_map_registers; - if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, + if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 4)) return -1; @@ -3132,18 +3009,20 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev) static int ql_adapter_initialize(struct ql3_adapter *qdev) { u32 value; - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; struct ql3xxx_host_memory_registers __iomem *hmem_regs = - (void __iomem *)port_regs; + (void __iomem *)port_regs; u32 delay = 10; int status = 0; unsigned long hw_flags = 0; - if(ql_mii_setup(qdev)) + if (ql_mii_setup(qdev)) return -1; /* Bring out PHY out of reset */ - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ql_write_common_reg(qdev, spir, (ISP_SERIAL_PORT_IF_WE | (ISP_SERIAL_PORT_IF_WE << 16))); /* Give the PHY time to come out of reset. */ @@ -3152,13 +3031,13 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) netif_carrier_off(qdev->ndev); /* V2 chip fix for ARS-39168. */ - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, + ql_write_common_reg(qdev, spir, (ISP_SERIAL_PORT_IF_SDE | (ISP_SERIAL_PORT_IF_SDE << 16))); /* Request Queue Registers */ - *((u32 *) (qdev->preq_consumer_index)) = 0; - atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES); + *((u32 *)(qdev->preq_consumer_index)) = 0; + atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); qdev->req_producer_index = 0; ql_write_page1_reg(qdev, @@ -3208,7 +3087,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) &hmem_regs->rxLargeQBaseAddrLow, LS_64BITS(qdev->lrg_buf_q_phy_addr)); - ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQLength, + qdev->num_lbufq_entries); ql_write_page1_reg(qdev, &hmem_regs->rxLargeBufferLength, @@ -3258,7 +3139,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) if ((value & PORT_STATUS_IC) == 0) { /* Chip has not been configured yet, so let it rip. */ - if(ql_init_misc_registers(qdev)) { + if (ql_init_misc_registers(qdev)) { status = -1; goto out; } @@ -3268,7 +3149,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; - if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, + if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 13)) { status = -1; @@ -3291,7 +3172,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) &port_regs->mac0MaxFrameLengthReg, qdev->max_frame_size); - if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { status = -1; @@ -3353,8 +3234,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) } while (--delay); if (delay == 0) { - printk(KERN_ERR PFX - "%s: Hw Initialization timeout.\n", qdev->ndev->name); + netdev_err(qdev->ndev, "Hw Initialization timeout\n"); status = -1; goto out; } @@ -3385,7 +3265,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) */ static int ql_adapter_reset(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; int status = 0; u16 value; int max_wait_time; @@ -3396,17 +3277,14 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) /* * Issue soft reset to chip. */ - printk(KERN_DEBUG PFX - "%s: Issue soft reset to chip.\n", - qdev->ndev->name); + netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); /* Wait 3 seconds for reset to complete. */ - printk(KERN_DEBUG PFX - "%s: Wait 10 milliseconds for reset to complete.\n", - qdev->ndev->name); + netdev_printk(KERN_DEBUG, qdev->ndev, + "Wait 10 milliseconds for reset to complete\n"); /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = 5; @@ -3427,8 +3305,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & ISP_CONTROL_RI) { - printk(KERN_DEBUG PFX - "ql_adapter_reset: clearing RI after reset.\n"); + netdev_printk(KERN_DEBUG, qdev->ndev, + "clearing RI after reset\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, @@ -3448,13 +3326,11 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) */ max_wait_time = 5; do { - value = - ql_read_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus); - if ((value & ISP_CONTROL_FSR) == 0) { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus); + if ((value & ISP_CONTROL_FSR) == 0) break; - } ssleep(1); } while ((--max_wait_time)); } @@ -3468,7 +3344,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) static void ql_set_mac_info(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; u32 value, port_status; u8 func_number; @@ -3484,9 +3361,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) qdev->mb_bit_mask = FN0_MA_BITS_MASK; qdev->PHYAddr = PORT0_PHY_ADDRESS; if (port_status & PORT_STATUS_SM0) - set_bit(QL_LINK_OPTICAL,&qdev->flags); + set_bit(QL_LINK_OPTICAL, &qdev->flags); else - clear_bit(QL_LINK_OPTICAL,&qdev->flags); + clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN1_NET: @@ -3495,17 +3372,17 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) qdev->mb_bit_mask = FN1_MA_BITS_MASK; qdev->PHYAddr = PORT1_PHY_ADDRESS; if (port_status & PORT_STATUS_SM1) - set_bit(QL_LINK_OPTICAL,&qdev->flags); + set_bit(QL_LINK_OPTICAL, &qdev->flags); else - clear_bit(QL_LINK_OPTICAL,&qdev->flags); + clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN0_SCSI: case ISP_CONTROL_FN1_SCSI: default: - printk(KERN_DEBUG PFX - "%s: Invalid function number, ispControlStatus = 0x%x\n", - qdev->ndev->name,value); + netdev_printk(KERN_DEBUG, qdev->ndev, + "Invalid function number, ispControlStatus = 0x%x\n", + value); break; } qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; @@ -3516,32 +3393,26 @@ static void ql_display_dev_info(struct net_device *ndev) struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; - printk(KERN_INFO PFX - "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", - DRV_NAME, qdev->index, qdev->chip_rev_id, - (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022", - qdev->pci_slot); - printk(KERN_INFO PFX - "%s Interface.\n", - test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); + netdev_info(ndev, + "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", + DRV_NAME, qdev->index, qdev->chip_rev_id, + qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", + qdev->pci_slot); + netdev_info(ndev, "%s Interface\n", + test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); /* * Print PCI bus width/type. */ - printk(KERN_INFO PFX - "Bus interface is %s %s.\n", - ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), - ((qdev->pci_x) ? "PCI-X" : "PCI")); + netdev_info(ndev, "Bus interface is %s %s\n", + ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), + ((qdev->pci_x) ? "PCI-X" : "PCI")); - printk(KERN_INFO PFX - "mem IO base address adjusted = 0x%p\n", - qdev->mem_map_registers); - printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq); + netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", + qdev->mem_map_registers); + netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); - if (netif_msg_probe(qdev)) - printk(KERN_INFO PFX - "%s: MAC address %pM\n", - ndev->name, ndev->dev_addr); + netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); } static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) @@ -3552,17 +3423,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) netif_stop_queue(ndev); netif_carrier_off(ndev); - clear_bit(QL_ADAPTER_UP,&qdev->flags); - clear_bit(QL_LINK_MASTER,&qdev->flags); + clear_bit(QL_ADAPTER_UP, &qdev->flags); + clear_bit(QL_LINK_MASTER, &qdev->flags); ql_disable_interrupts(qdev); free_irq(qdev->pdev->irq, ndev); - if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { - printk(KERN_INFO PFX - "%s: calling pci_disable_msi().\n", qdev->ndev->name); - clear_bit(QL_MSI_ENABLED,&qdev->flags); + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } @@ -3576,17 +3446,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_wait_for_drvr_lock(qdev)) { - if ((soft_reset = ql_adapter_reset(qdev))) { - printk(KERN_ERR PFX - "%s: ql_adapter_reset(%d) FAILED!\n", - ndev->name, qdev->index); + soft_reset = ql_adapter_reset(qdev); + if (soft_reset) { + netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", + qdev->index); } - printk(KERN_ERR PFX - "%s: Releaseing driver lock via chip reset.\n",ndev->name); + netdev_err(ndev, + "Releasing driver lock via chip reset\n"); } else { - printk(KERN_ERR PFX - "%s: Could not acquire driver lock to do " - "reset!\n", ndev->name); + netdev_err(ndev, + "Could not acquire driver lock to do reset!\n"); retval = -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); @@ -3603,56 +3472,50 @@ static int ql_adapter_up(struct ql3_adapter *qdev) unsigned long hw_flags; if (ql_alloc_mem_resources(qdev)) { - printk(KERN_ERR PFX - "%s Unable to allocate buffers.\n", ndev->name); + netdev_err(ndev, "Unable to allocate buffers\n"); return -ENOMEM; } if (qdev->msi) { if (pci_enable_msi(qdev->pdev)) { - printk(KERN_ERR PFX - "%s: User requested MSI, but MSI failed to " - "initialize. Continuing without MSI.\n", - qdev->ndev->name); + netdev_err(ndev, + "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); qdev->msi = 0; } else { - printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name); - set_bit(QL_MSI_ENABLED,&qdev->flags); + netdev_info(ndev, "MSI Enabled...\n"); + set_bit(QL_MSI_ENABLED, &qdev->flags); irq_flags &= ~IRQF_SHARED; } } - if ((err = request_irq(qdev->pdev->irq, - ql3xxx_isr, - irq_flags, ndev->name, ndev))) { - printk(KERN_ERR PFX - "%s: Failed to reserve interrupt %d already in use.\n", - ndev->name, qdev->pdev->irq); + err = request_irq(qdev->pdev->irq, ql3xxx_isr, + irq_flags, ndev->name, ndev); + if (err) { + netdev_err(ndev, + "Failed to reserve interrupt %d - already in use\n", + qdev->pdev->irq); goto err_irq; } spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if ((err = ql_wait_for_drvr_lock(qdev))) { - if ((err = ql_adapter_initialize(qdev))) { - printk(KERN_ERR PFX - "%s: Unable to initialize adapter.\n", - ndev->name); + err = ql_wait_for_drvr_lock(qdev); + if (err) { + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); goto err_init; } - printk(KERN_ERR PFX - "%s: Releaseing driver lock.\n",ndev->name); + netdev_err(ndev, "Releasing driver lock\n"); ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); } else { - printk(KERN_ERR PFX - "%s: Could not acquire driver lock.\n", - ndev->name); + netdev_err(ndev, "Could not acquire driver lock\n"); goto err_lock; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - set_bit(QL_ADAPTER_UP,&qdev->flags); + set_bit(QL_ADAPTER_UP, &qdev->flags); mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); @@ -3666,11 +3529,9 @@ static int ql_adapter_up(struct ql3_adapter *qdev) spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); free_irq(qdev->pdev->irq, ndev); err_irq: - if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { - printk(KERN_INFO PFX - "%s: calling pci_disable_msi().\n", - qdev->ndev->name); - clear_bit(QL_MSI_ENABLED,&qdev->flags); + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } return err; @@ -3678,10 +3539,9 @@ static int ql_adapter_up(struct ql3_adapter *qdev) static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) { - if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) { - printk(KERN_ERR PFX - "%s: Driver up/down cycle failed, " - "closing device\n",qdev->ndev->name); + if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { + netdev_err(qdev->ndev, + "Driver up/down cycle failed, closing device\n"); rtnl_lock(); dev_close(qdev->ndev); rtnl_unlock(); @@ -3698,24 +3558,24 @@ static int ql3xxx_close(struct net_device *ndev) * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ - while (!test_bit(QL_ADAPTER_UP,&qdev->flags)) + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) msleep(50); - ql_adapter_down(qdev,QL_DO_RESET); + ql_adapter_down(qdev, QL_DO_RESET); return 0; } static int ql3xxx_open(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); - return (ql_adapter_up(qdev)); + return ql_adapter_up(qdev); } static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; + qdev->mem_map_registers; struct sockaddr *addr = p; unsigned long hw_flags; @@ -3750,7 +3610,7 @@ static void ql3xxx_tx_timeout(struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); - printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name); + netdev_err(ndev, "Resetting...\n"); /* * Stop the queues, we've got a problem. */ @@ -3770,11 +3630,12 @@ static void ql_reset_work(struct work_struct *work) u32 value; struct ql_tx_buf_cb *tx_cb; int max_wait_time, i; - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; unsigned long hw_flags; - if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) { - clear_bit(QL_LINK_MASTER,&qdev->flags); + if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { + clear_bit(QL_LINK_MASTER, &qdev->flags); /* * Loop through the active list and return the skb. @@ -3783,17 +3644,19 @@ static void ql_reset_work(struct work_struct *work) int j; tx_cb = &qdev->tx_buf[i]; if (tx_cb->skb) { - printk(KERN_DEBUG PFX - "%s: Freeing lost SKB.\n", - qdev->ndev->name); + netdev_printk(KERN_DEBUG, ndev, + "Freeing lost SKB\n"); pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_addr(&tx_cb->map[0], + mapaddr), dma_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); - for(j=1;jseg_count;j++) { + for (j = 1; j < tx_cb->seg_count; j++) { pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_cb->map[j],mapaddr), - dma_unmap_len(&tx_cb->map[j],maplen), + dma_unmap_addr(&tx_cb->map[j], + mapaddr), + dma_unmap_len(&tx_cb->map[j], + maplen), PCI_DMA_TODEVICE); } dev_kfree_skb(tx_cb->skb); @@ -3801,8 +3664,7 @@ static void ql_reset_work(struct work_struct *work) } } - printk(KERN_ERR PFX - "%s: Clearing NRI after reset.\n", qdev->ndev->name); + netdev_err(ndev, "Clearing NRI after reset\n"); spin_lock_irqsave(&qdev->hw_lock, hw_flags); ql_write_common_reg(qdev, &port_regs->CommonRegs. @@ -3818,16 +3680,14 @@ static void ql_reset_work(struct work_struct *work) ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) { - printk(KERN_DEBUG PFX - "%s: reset completed.\n", - qdev->ndev->name); + netdev_printk(KERN_DEBUG, ndev, + "reset completed\n"); break; } if (value & ISP_CONTROL_RI) { - printk(KERN_DEBUG PFX - "%s: clearing NRI after reset.\n", - qdev->ndev->name); + netdev_printk(KERN_DEBUG, ndev, + "clearing NRI after reset\n"); ql_write_common_reg(qdev, &port_regs-> CommonRegs. @@ -3848,21 +3708,19 @@ static void ql_reset_work(struct work_struct *work) * Set the reset flags and clear the board again. * Nothing else to do... */ - printk(KERN_ERR PFX - "%s: Timed out waiting for reset to " - "complete.\n", ndev->name); - printk(KERN_ERR PFX - "%s: Do a reset.\n", ndev->name); - clear_bit(QL_RESET_PER_SCSI,&qdev->flags); - clear_bit(QL_RESET_START,&qdev->flags); - ql_cycle_adapter(qdev,QL_DO_RESET); + netdev_err(ndev, + "Timed out waiting for reset to complete\n"); + netdev_err(ndev, "Do a reset\n"); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_DO_RESET); return; } - clear_bit(QL_RESET_ACTIVE,&qdev->flags); - clear_bit(QL_RESET_PER_SCSI,&qdev->flags); - clear_bit(QL_RESET_START,&qdev->flags); - ql_cycle_adapter(qdev,QL_NO_RESET); + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_NO_RESET); } } @@ -3876,7 +3734,8 @@ static void ql_tx_timeout_work(struct work_struct *work) static void ql_get_board_info(struct ql3_adapter *qdev) { - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; u32 value; value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); @@ -3915,20 +3774,18 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, { struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; - static int cards_found = 0; + static int cards_found; int uninitialized_var(pci_using_dac), err; err = pci_enable_device(pdev); if (err) { - printk(KERN_ERR PFX "%s cannot enable PCI device\n", - pci_name(pdev)); + pr_err("%s cannot enable PCI device\n", pci_name(pdev)); goto err_out; } err = pci_request_regions(pdev, DRV_NAME); if (err) { - printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", - pci_name(pdev)); + pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); goto err_out_disable_pdev; } @@ -3943,15 +3800,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, } if (err) { - printk(KERN_ERR PFX "%s no usable DMA configuration\n", - pci_name(pdev)); + pr_err("%s no usable DMA configuration\n", pci_name(pdev)); goto err_out_free_regions; } ndev = alloc_etherdev(sizeof(struct ql3_adapter)); if (!ndev) { - printk(KERN_ERR PFX "%s could not alloc etherdev\n", - pci_name(pdev)); + pr_err("%s could not alloc etherdev\n", pci_name(pdev)); err = -ENOMEM; goto err_out_free_regions; } @@ -3978,8 +3833,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); if (!qdev->mem_map_registers) { - printk(KERN_ERR PFX "%s: cannot map device registers\n", - pci_name(pdev)); + pr_err("%s: cannot map device registers\n", pci_name(pdev)); err = -EIO; goto err_out_free_ndev; } @@ -3998,9 +3852,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, /* make sure the EEPROM is good */ if (ql_get_nvram_params(qdev)) { - printk(KERN_ALERT PFX - "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", - qdev->index); + pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", + __func__, qdev->index); err = -EIO; goto err_out_iounmap; } @@ -4026,14 +3879,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, * Set the Maximum Memory Read Byte Count value. We do this to handle * jumbo frames. */ - if (qdev->pci_x) { + if (qdev->pci_x) pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); - } err = register_netdev(ndev); if (err) { - printk(KERN_ERR PFX "%s: cannot register net device\n", - pci_name(pdev)); + pr_err("%s: cannot register net device\n", pci_name(pdev)); goto err_out_iounmap; } @@ -4052,10 +3903,10 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ qdev->adapter_timer.data = (unsigned long)qdev; - if(!cards_found) { - printk(KERN_ALERT PFX "%s\n", DRV_STRING); - printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n", - DRV_NAME, DRV_VERSION); + if (!cards_found) { + pr_alert("%s\n", DRV_STRING); + pr_alert("Driver name: %s, Version: %s\n", + DRV_NAME, DRV_VERSION); } ql_display_dev_info(ndev); diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 896d40df9a134dddb3c470373c3c4f01a44e7976..970389331bbc4cb5c91da180fd04d282db872ac5 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h @@ -51,8 +51,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 2 -#define QLCNIC_LINUX_VERSIONID "5.0.2" +#define _QLCNIC_LINUX_SUBVERSION 7 +#define QLCNIC_LINUX_VERSIONID "5.0.7" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) @@ -68,6 +68,7 @@ #define QLCNIC_DECODE_VERSION(v) \ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) +#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2) #define QLCNIC_NUM_FLASH_SECTORS (64) #define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) #define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ @@ -112,8 +113,10 @@ #define TX_UDPV6_PKT 0x0c /* Tx defines */ -#define MAX_BUFFERS_PER_CMD 32 -#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) #define QLCNIC_MAX_TX_TIMEOUTS 2 /* @@ -197,8 +200,7 @@ struct cmd_desc_type0 { __le64 addr_buffer4; - __le32 reserved2; - __le16 reserved; + u8 eth_addr[ETH_ALEN]; __le16 vlan_TCI; } __attribute__ ((aligned(64))); @@ -315,6 +317,8 @@ struct uni_data_desc{ #define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032 #define QLCNIC_BRDTYPE_P3_10G_TP 0x0080 +#define QLCNIC_MSIX_TABLE_OFFSET 0x44 + /* Flash memory map */ #define QLCNIC_BRDCFG_START 0x4000 /* board config */ #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ @@ -367,7 +371,7 @@ struct qlcnic_recv_crb { */ struct qlcnic_cmd_buffer { struct sk_buff *skb; - struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; + struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1]; u32 frag_count; }; @@ -377,7 +381,6 @@ struct qlcnic_rx_buffer { struct sk_buff *skb; u64 dma; u16 ref_handle; - u16 state; }; /* Board types */ @@ -419,7 +422,6 @@ struct qlcnic_adapter_stats { u64 xmit_on; u64 xmit_off; u64 skb_alloc_failure; - u64 null_skb; u64 null_rxbuf; u64 rx_dma_map_error; u64 tx_dma_map_error; @@ -542,7 +544,17 @@ struct qlcnic_recv_context { #define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c #define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d #define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e -#define QLCNIC_CDRP_CMD_MAX 0x0000001f +#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f + +#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 +#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 +#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 +#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 +#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 +#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 +#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 #define QLCNIC_RCODE_SUCCESS 0 #define QLCNIC_RCODE_TIMEOUT 17 @@ -556,12 +568,12 @@ struct qlcnic_recv_context { #define QLCNIC_CAP0_LSO (1 << 6) #define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) #define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) +#define QLCNIC_CAP0_VALIDOFF (1 << 11) /* * Context state */ -#define QLCHAL_VERSION 1 - +#define QLCNIC_HOST_CTX_STATE_FREED 0 #define QLCNIC_HOST_CTX_STATE_ACTIVE 2 /* @@ -592,9 +604,10 @@ struct qlcnic_hostrq_rx_ctx { __le32 sds_ring_offset; /* Offset to SDS config */ __le16 num_rds_rings; /* Count of RDS rings */ __le16 num_sds_rings; /* Count of SDS rings */ - __le16 rsvd1; /* Padding */ - __le16 rsvd2; /* Padding */ - u8 reserved[128]; /* reserve space for future expansion*/ + __le16 valid_field_offset; + u8 txrx_sds_binding; + u8 msix_handler; + u8 reserved[128]; /* reserve space for future expansion*/ /* MUST BE 64-bit aligned. The following is packed: - N hostrq_rds_rings @@ -808,9 +821,10 @@ struct qlcnic_nic_intr_coalesce { #define QLCNIC_LRO_REQUEST_CLEANUP 4 /* Capabilites received */ -#define QLCNIC_FW_CAPABILITY_BDG (1 << 8) -#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9) -#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10) +#define QLCNIC_FW_CAPABILITY_TSO BIT_1 +#define QLCNIC_FW_CAPABILITY_BDG BIT_8 +#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 +#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 @@ -881,12 +895,14 @@ struct qlcnic_mac_req { #define QLCNIC_LRO_ENABLED 0x08 #define QLCNIC_BRIDGE_ENABLED 0X10 #define QLCNIC_DIAG_ENABLED 0x20 +#define QLCNIC_ESWITCH_ENABLED 0x40 #define QLCNIC_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) #define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS #define QLCNIC_MSIX_TBL_SPACE 8192 #define QLCNIC_PCI_REG_MSIX_TBL 0x44 +#define QLCNIC_MSIX_TBL_PGSIZE 4096 #define QLCNIC_NETDEV_WEIGHT 128 #define QLCNIC_ADAPTER_UP_MAGIC 777 @@ -895,6 +911,7 @@ struct qlcnic_mac_req { #define __QLCNIC_DEV_UP 1 #define __QLCNIC_RESETTING 2 #define __QLCNIC_START_FW 4 +#define __QLCNIC_AER 5 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -919,11 +936,11 @@ struct qlcnic_adapter { u8 rx_csum; u8 portnum; u8 physical_port; + u8 reset_context; u8 mc_enabled; u8 max_mc_count; u8 rss_supported; - u8 rsrvd1; u8 fw_wait_cnt; u8 fw_fail_cnt; u8 tx_timeo_cnt; @@ -932,7 +949,6 @@ struct qlcnic_adapter { u8 has_link_events; u8 fw_type; u16 tx_context_id; - u16 mtu; u16 is_up; u16 link_speed; @@ -940,6 +956,13 @@ struct qlcnic_adapter { u16 link_autoneg; u16 module_type; + u16 op_mode; + u16 switch_mode; + u16 max_tx_ques; + u16 max_rx_ques; + u16 max_mtu; + + u32 fw_hal_version; u32 capabilities; u32 flags; u32 irq; @@ -948,18 +971,22 @@ struct qlcnic_adapter { u32 int_vec_bit; u32 heartbit; + u8 max_mac_filters; u8 dev_state; u8 diag_test; u8 diag_cnt; u8 reset_ack_timeo; u8 dev_init_timeo; - u8 rsrd1; u16 msg_enable; u8 mac_addr[ETH_ALEN]; u64 dev_rst_time; + struct qlcnic_npar_info *npars; + struct qlcnic_eswitch *eswitch; + struct qlcnic_nic_template *nic_ops; + struct qlcnic_adapter_stats stats; struct qlcnic_recv_context recv_ctx; @@ -974,8 +1001,6 @@ struct qlcnic_adapter { struct delayed_work fw_work; - struct work_struct tx_timeout_task; - struct qlcnic_nic_intr_coalesce coal; unsigned long state; @@ -984,6 +1009,123 @@ struct qlcnic_adapter { const struct firmware *fw; }; +struct qlcnic_info { + __le16 pci_func; + __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ + __le16 phys_port; + __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ + + __le32 capabilities; + u8 max_mac_filters; + u8 reserved1; + __le16 max_mtu; + + __le16 max_tx_ques; + __le16 max_rx_ques; + __le16 min_tx_bw; + __le16 max_tx_bw; + u8 reserved2[104]; +}; + +struct qlcnic_pci_info { + __le16 id; /* pci function id */ + __le16 active; /* 1 = Enabled */ + __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ + __le16 default_port; /* default port number */ + + __le16 tx_min_bw; /* Multiple of 100mbpc */ + __le16 tx_max_bw; + __le16 reserved1[2]; + + u8 mac[ETH_ALEN]; + u8 reserved2[106]; +}; + +struct qlcnic_npar_info { + u16 vlan_id; + u16 min_bw; + u16 max_bw; + u8 phy_port; + u8 type; + u8 active; + u8 enable_pm; + u8 dest_npar; + u8 host_vlan_tag; + u8 promisc_mode; + u8 discard_tagged; + u8 mac_learning; +}; +struct qlcnic_eswitch { + u8 port; + u8 active_vports; + u8 active_vlans; + u8 active_ucast_filters; + u8 max_ucast_filters; + u8 max_active_vlans; + + u32 flags; +#define QLCNIC_SWITCH_ENABLE BIT_1 +#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2 +#define QLCNIC_SWITCH_PROMISC_MODE BIT_3 +#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4 +}; + + +/* Return codes for Error handling */ +#define QL_STATUS_INVALID_PARAM -1 + +#define MAX_BW 100 +#define MIN_BW 1 +#define MAX_VLAN_ID 4095 +#define MIN_VLAN_ID 2 +#define MAX_TX_QUEUES 1 +#define MAX_RX_QUEUES 4 +#define DEFAULT_MAC_LEARN 1 + +#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID) +#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) +#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) +#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) +#define IS_VALID_MODE(mode) (mode == 0 || mode == 1) + +struct qlcnic_pci_func_cfg { + u16 func_type; + u16 min_bw; + u16 max_bw; + u16 port_num; + u8 pci_func; + u8 func_state; + u8 def_mac_addr[6]; +}; + +struct qlcnic_npar_func_cfg { + u32 fw_capab; + u16 port_num; + u16 min_bw; + u16 max_bw; + u16 max_tx_queues; + u16 max_rx_queues; + u8 pci_func; + u8 op_mode; +}; + +struct qlcnic_pm_func_cfg { + u8 pci_func; + u8 action; + u8 dest_npar; + u8 reserved[5]; +}; + +struct qlcnic_esw_func_cfg { + u16 vlan_id; + u8 pci_func; + u8 host_vlan_tag; + u8 promisc_mode; + u8 discard_tagged; + u8 mac_learning; + u8 reserved; +}; + int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val); @@ -1031,13 +1173,13 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter); int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); /* Functions from qlcnic_init.c */ -int qlcnic_phantom_init(struct qlcnic_adapter *adapter); int qlcnic_load_firmware(struct qlcnic_adapter *adapter); int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); void qlcnic_request_firmware(struct qlcnic_adapter *adapter); void qlcnic_release_firmware(struct qlcnic_adapter *adapter); int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); +int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter); int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp); int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, @@ -1050,6 +1192,10 @@ void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32); int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); +int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter); +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter); + +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); @@ -1070,13 +1216,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); -int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable); +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring); -int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac); +int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac); void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter); int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter); +void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); /* Functions from qlcnic_main.c */ int qlcnic_reset_context(struct qlcnic_adapter *); @@ -1088,6 +1235,25 @@ int qlcnic_check_loopback_buff(unsigned char *data); netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); +/* Management functions */ +int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*); +int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); +int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); +int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); +int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); +int qlcnic_reset_partition(struct qlcnic_adapter *, u8); + +/* eSwitch management functions */ +int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8, + struct qlcnic_eswitch *); +int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8, + struct qlcnic_eswitch *); +int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8); +int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8, + u8, u8, u16); +int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); +extern int qlcnic_config_tso; + /* * QLOGIC Board information */ @@ -1131,6 +1297,13 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) extern const struct ethtool_ops qlcnic_ethtool_ops; +struct qlcnic_nic_template { + int (*get_mac_addr) (struct qlcnic_adapter *, u8*); + int (*config_bridged_mode) (struct qlcnic_adapter *, u32); + int (*config_led) (struct qlcnic_adapter *, u32, u32); + int (*start_firmware) (struct qlcnic_adapter *); +}; + #define QLCDB(adapter, lvl, _fmt, _args...) do { \ if (NETIF_MSG_##lvl & adapter->msg_enable) \ printk(KERN_INFO "%s: %s: " _fmt, \ diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c index c2c1f5cc16c6d992d9d00b3a2ab6b84569335874..cc5d861d9a128bfc932da00a6bb0917a7ac26931 100644 --- a/drivers/net/qlcnic/qlcnic_ctx.c +++ b/drivers/net/qlcnic/qlcnic_ctx.c @@ -88,12 +88,12 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { if (qlcnic_issue_cmd(adapter, - adapter->ahw.pci_func, - QLCHAL_VERSION, - recv_ctx->context_id, - mtu, - 0, - QLCNIC_CDRP_CMD_SET_MTU)) { + adapter->ahw.pci_func, + adapter->fw_hal_version, + recv_ctx->context_id, + mtu, + 0, + QLCNIC_CDRP_CMD_SET_MTU)) { dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); return -EIO; @@ -121,7 +121,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) int i, nrds_rings, nsds_rings; size_t rq_size, rsp_size; - u32 cap, reg, val; + u32 cap, reg, val, reg2; int err; struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; @@ -152,9 +152,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); - cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN); + cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN + | QLCNIC_CAP0_VALIDOFF); cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); + prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, + msix_handler); + prq->txrx_sds_binding = nsds_rings - 1; + prq->capabilities[0] = cpu_to_le32(cap); prq->host_int_crb_mode = cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); @@ -175,6 +180,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) for (i = 0; i < nrds_rings; i++) { rds_ring = &recv_ctx->rds_rings[i]; + rds_ring->producer = 0; prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); @@ -188,6 +194,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) for (i = 0; i < nsds_rings; i++) { sds_ring = &recv_ctx->sds_rings[i]; + sds_ring->consumer = 0; + memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); @@ -197,7 +205,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) phys_addr = hostrq_phys_addr; err = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, - QLCHAL_VERSION, + adapter->fw_hal_version, (u32)(phys_addr >> 32), (u32)(phys_addr & 0xffffffff), rq_size, @@ -216,8 +224,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) rds_ring = &recv_ctx->rds_rings[i]; reg = le32_to_cpu(prsp_rds[i].host_producer_crb); - rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter, - QLCNIC_REG(reg - 0x200)); + rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg; } prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) @@ -227,12 +234,10 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) sds_ring = &recv_ctx->sds_rings[i]; reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); - sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter, - QLCNIC_REG(reg - 0x200)); + reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); - reg = le32_to_cpu(prsp_sds[i].interrupt_crb); - sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter, - QLCNIC_REG(reg - 0x200)); + sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg; + sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2; } recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); @@ -253,7 +258,7 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) if (qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, - QLCHAL_VERSION, + adapter->fw_hal_version, recv_ctx->context_id, QLCNIC_DESTROY_CTX_RESET, 0, @@ -262,6 +267,8 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) dev_err(&adapter->pdev->dev, "Failed to destroy rx ctx in firmware\n"); } + + recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; } static int @@ -278,6 +285,11 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) dma_addr_t rq_phys_addr, rsp_phys_addr; struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + /* reset host resources */ + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + *(tx_ring->hw_consumer) = 0; + rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); rq_addr = pci_alloc_consistent(adapter->pdev, rq_size, &rq_phys_addr); @@ -319,7 +331,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) phys_addr = rq_phys_addr; err = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, - QLCHAL_VERSION, + adapter->fw_hal_version, (u32)(phys_addr >> 32), ((u32)phys_addr & 0xffffffff), rq_size, @@ -327,8 +339,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) if (err == QLCNIC_RCODE_SUCCESS) { temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); - tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter, - QLCNIC_REG(temp - 0x200)); + tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp; adapter->tx_context_id = le16_to_cpu(prsp->context_id); @@ -351,7 +362,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) { if (qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, - QLCHAL_VERSION, + adapter->fw_hal_version, adapter->tx_context_id, QLCNIC_DESTROY_CTX_RESET, 0, @@ -368,7 +379,7 @@ qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val) if (qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, - QLCHAL_VERSION, + adapter->fw_hal_version, reg, 0, 0, @@ -385,7 +396,7 @@ qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val) { return qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, - QLCHAL_VERSION, + adapter->fw_hal_version, reg, val, 0, @@ -457,15 +468,6 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) sds_ring->desc_head = (struct status_desc *)addr; } - - err = qlcnic_fw_cmd_create_rx_ctx(adapter); - if (err) - goto err_out_free; - err = qlcnic_fw_cmd_create_tx_ctx(adapter); - if (err) - goto err_out_free; - - set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); return 0; err_out_free: @@ -473,15 +475,27 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) return err; } -void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) + +int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) { - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; - int ring; + int err; + err = qlcnic_fw_cmd_create_rx_ctx(adapter); + if (err) + return err; + err = qlcnic_fw_cmd_create_tx_ctx(adapter); + if (err) { + qlcnic_fw_cmd_destroy_rx_ctx(adapter); + return err; + } + + set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + return 0; +} + +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) +{ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { qlcnic_fw_cmd_destroy_rx_ctx(adapter); qlcnic_fw_cmd_destroy_tx_ctx(adapter); @@ -489,6 +503,15 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) /* Allow dma queues to drain after context reset */ msleep(20); } +} + +void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int ring; recv_ctx = &adapter->recv_ctx; @@ -533,3 +556,430 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) } } +/* Set MAC address of a NIC partition */ +int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac) +{ + int err = 0; + u32 arg1, arg2, arg3; + + arg1 = adapter->ahw.pci_func | BIT_9; + arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + arg3 = mac[4] | (mac[5] << 16); + + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + arg1, + arg2, + arg3, + QLCNIC_CDRP_CMD_MAC_ADDRESS); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to set mac address%d\n", err); + err = -EIO; + } + + return err; +} + +/* Get MAC address of a NIC partition */ +int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) +{ + int err; + u32 arg1; + + arg1 = adapter->ahw.pci_func | BIT_8; + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_MAC_ADDRESS); + + if (err == QLCNIC_RCODE_SUCCESS) + qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET, + QLCNIC_ARG2_CRB_OFFSET, 0, mac); + else { + dev_err(&adapter->pdev->dev, + "Failed to get mac address%d\n", err); + err = -EIO; + } + + return err; +} + +/* Get info of a NIC partition */ +int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u8 func_id) +{ + int err; + dma_addr_t nic_dma_t; + struct qlcnic_info *nic_info; + void *nic_info_addr; + size_t nic_size = sizeof(struct qlcnic_info); + + nic_info_addr = pci_alloc_consistent(adapter->pdev, + nic_size, &nic_dma_t); + if (!nic_info_addr) + return -ENOMEM; + memset(nic_info_addr, 0, nic_size); + + nic_info = (struct qlcnic_info *) nic_info_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + MSD(nic_dma_t), + LSD(nic_dma_t), + (func_id << 16 | nic_size), + QLCNIC_CDRP_CMD_GET_NIC_INFO); + + if (err == QLCNIC_RCODE_SUCCESS) { + npar_info->pci_func = le16_to_cpu(nic_info->pci_func); + npar_info->op_mode = le16_to_cpu(nic_info->op_mode); + npar_info->phys_port = le16_to_cpu(nic_info->phys_port); + npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); + npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); + npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); + npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); + npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); + npar_info->capabilities = le32_to_cpu(nic_info->capabilities); + npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); + + dev_info(&adapter->pdev->dev, + "phy port: %d switch_mode: %d,\n" + "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" + "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", + npar_info->phys_port, npar_info->switch_mode, + npar_info->max_tx_ques, npar_info->max_rx_ques, + npar_info->min_tx_bw, npar_info->max_tx_bw, + npar_info->max_mtu, npar_info->capabilities); + } else { + dev_err(&adapter->pdev->dev, + "Failed to get nic info%d\n", err); + err = -EIO; + } + + pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); + return err; +} + +/* Configure a NIC partition */ +int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) +{ + int err = -EIO; + dma_addr_t nic_dma_t; + void *nic_info_addr; + struct qlcnic_info *nic_info; + size_t nic_size = sizeof(struct qlcnic_info); + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + + nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size, + &nic_dma_t); + if (!nic_info_addr) + return -ENOMEM; + + memset(nic_info_addr, 0, nic_size); + nic_info = (struct qlcnic_info *)nic_info_addr; + + nic_info->pci_func = cpu_to_le16(nic->pci_func); + nic_info->op_mode = cpu_to_le16(nic->op_mode); + nic_info->phys_port = cpu_to_le16(nic->phys_port); + nic_info->switch_mode = cpu_to_le16(nic->switch_mode); + nic_info->capabilities = cpu_to_le32(nic->capabilities); + nic_info->max_mac_filters = nic->max_mac_filters; + nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); + nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); + nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); + nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); + + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + MSD(nic_dma_t), + LSD(nic_dma_t), + ((nic->pci_func << 16) | nic_size), + QLCNIC_CDRP_CMD_SET_NIC_INFO); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to set nic info%d\n", err); + err = -EIO; + } + + pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); + return err; +} + +/* Get PCI Info of a partition */ +int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *pci_info) +{ + int err = 0, i; + dma_addr_t pci_info_dma_t; + struct qlcnic_pci_info *npar; + void *pci_info_addr; + size_t npar_size = sizeof(struct qlcnic_pci_info); + size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; + + pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size, + &pci_info_dma_t); + if (!pci_info_addr) + return -ENOMEM; + memset(pci_info_addr, 0, pci_size); + + npar = (struct qlcnic_pci_info *) pci_info_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + MSD(pci_info_dma_t), + LSD(pci_info_dma_t), + pci_size, + QLCNIC_CDRP_CMD_GET_PCI_INFO); + + if (err == QLCNIC_RCODE_SUCCESS) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { + pci_info->id = le32_to_cpu(npar->id); + pci_info->active = le32_to_cpu(npar->active); + pci_info->type = le32_to_cpu(npar->type); + pci_info->default_port = + le32_to_cpu(npar->default_port); + pci_info->tx_min_bw = + le32_to_cpu(npar->tx_min_bw); + pci_info->tx_max_bw = + le32_to_cpu(npar->tx_max_bw); + memcpy(pci_info->mac, npar->mac, ETH_ALEN); + } + } else { + dev_err(&adapter->pdev->dev, + "Failed to get PCI Info%d\n", err); + err = -EIO; + } + + pci_free_consistent(adapter->pdev, pci_size, pci_info_addr, + pci_info_dma_t); + return err; +} + +/* Reset a NIC partition */ + +int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no) +{ + int err = -EIO; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + func_no, + 0, + 0, + QLCNIC_CDRP_CMD_RESET_NPAR); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to issue reset partition%d\n", err); + err = -EIO; + } + + return err; +} + +/* Get eSwitch Capabilities */ +int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port, + struct qlcnic_eswitch *eswitch) +{ + int err = -EIO; + u32 arg1, arg2; + + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return err; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + port, + 0, + 0, + QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY); + + if (err == QLCNIC_RCODE_SUCCESS) { + arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); + + eswitch->port = arg1 & 0xf; + eswitch->active_vports = LSB(arg2); + eswitch->max_ucast_filters = MSB(arg2); + eswitch->max_active_vlans = LSB(MSW(arg2)); + if (arg1 & BIT_6) + eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING; + if (arg1 & BIT_7) + eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE; + if (arg1 & BIT_8) + eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING; + } else { + dev_err(&adapter->pdev->dev, + "Failed to get eswitch capabilities%d\n", err); + } + + return err; +} + +/* Get current status of eswitch */ +int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port, + struct qlcnic_eswitch *eswitch) +{ + int err = -EIO; + u32 arg1, arg2; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + port, + 0, + 0, + QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS); + + if (err == QLCNIC_RCODE_SUCCESS) { + arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); + + eswitch->port = arg1 & 0xf; + eswitch->active_vports = LSB(arg2); + eswitch->active_ucast_filters = MSB(arg2); + eswitch->active_vlans = LSB(MSW(arg2)); + if (arg1 & BIT_6) + eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING; + if (arg1 & BIT_8) + eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING; + + } else { + dev_err(&adapter->pdev->dev, + "Failed to get eswitch status%d\n", err); + } + + return err; +} + +/* Enable/Disable eSwitch */ +int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable) +{ + int err = -EIO; + u32 arg1, arg2; + struct qlcnic_eswitch *eswitch; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + + eswitch = &adapter->eswitch[id]; + if (!eswitch) + return err; + + arg1 = eswitch->port | (enable ? BIT_4 : 0); + arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) | + (eswitch->max_active_vlans << 16); + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + arg1, + arg2, + 0, + QLCNIC_CDRP_CMD_TOGGLE_ESWITCH); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to enable eswitch%d\n", eswitch->port); + eswitch->flags &= ~QLCNIC_SWITCH_ENABLE; + err = -EIO; + } else { + eswitch->flags |= QLCNIC_SWITCH_ENABLE; + dev_info(&adapter->pdev->dev, + "Enabled eSwitch for port %d\n", eswitch->port); + } + + return err; +} + +/* Configure eSwitch for port mirroring */ +int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, + u8 enable_mirroring, u8 pci_func) +{ + int err = -EIO; + u32 arg1; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC || + !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) + return err; + + arg1 = id | (enable_mirroring ? BIT_4 : 0); + arg1 |= pci_func << 8; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_SET_PORTMIRRORING); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to configure port mirroring%d on eswitch:%d\n", + pci_func, id); + } else { + dev_info(&adapter->pdev->dev, + "Configured eSwitch %d for port mirroring:%d\n", + id, pci_func); + } + + return err; +} + +/* Configure eSwitch port */ +int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id, + int vlan_tagging, u8 discard_tagged, u8 promsc_mode, + u8 mac_learn, u8 pci_func, u16 vlan_id) +{ + int err = -EIO; + u32 arg1; + struct qlcnic_eswitch *eswitch; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + + eswitch = &adapter->eswitch[id]; + if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE)) + return err; + + arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0); + arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0); + arg1 |= pci_func << 8; + if (vlan_tagging) + arg1 |= BIT_5 | (vlan_id << 16); + + err = qlcnic_issue_cmd(adapter, + adapter->ahw.pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to configure eswitch port%d\n", eswitch->port); + } else { + dev_info(&adapter->pdev->dev, + "Configured eSwitch for port %d\n", eswitch->port); + } + + return err; +} diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 3bd514ec7e8fe07badf0cde0304a77a50edcc0ee..9328d59e21e0c6c2ff8011b19eb30f5f041fed5d 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c @@ -69,8 +69,6 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = { QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), QLC_OFF(stats.skb_alloc_failure)}, - {"null skb", - QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)}, {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), @@ -350,7 +348,7 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) for (i = 0; diag_registers[i] != -1; i++) regs_buff[i] = QLCRD32(adapter, diag_registers[i]); - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) return; regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ @@ -580,8 +578,12 @@ qlcnic_set_pauseparam(struct net_device *netdev, } QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); } else if (adapter->ahw.port_type == QLCNIC_XGBE) { + if (!pause->rx_pause || pause->autoneg) + return -EOPNOTSUPP; + if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) return -EIO; + val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); if (port == 0) { if (pause->tx_pause) @@ -676,6 +678,12 @@ static int qlcnic_loopback_test(struct net_device *netdev) int max_sds_rings = adapter->max_sds_rings; int ret; + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { + dev_warn(&adapter->pdev->dev, "Loopback test not supported" + "for non privilege function\n"); + return 0; + } + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; @@ -715,7 +723,8 @@ static int qlcnic_irq_test(struct net_device *netdev) adapter->diag_cnt = 0; ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, - QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011); + adapter->fw_hal_version, adapter->portnum, + 0, 0, 0x00000011); if (ret) goto done; @@ -821,6 +830,9 @@ static u32 qlcnic_get_tso(struct net_device *dev) static int qlcnic_set_tso(struct net_device *dev, u32 data) { + struct qlcnic_adapter *adapter = netdev_priv(dev); + if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)) + return -EOPNOTSUPP; if (data) dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); else @@ -834,7 +846,10 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val) struct qlcnic_adapter *adapter = netdev_priv(dev); int ret; - ret = qlcnic_config_led(adapter, 1, 0xf); + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return -EIO; + + ret = adapter->nic_ops->config_led(adapter, 1, 0xf); if (ret) { dev_err(&adapter->pdev->dev, "Failed to set LED blink state.\n"); @@ -843,7 +858,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val) msleep_interruptible(val * 1000); - ret = qlcnic_config_led(adapter, 0, 0xf); + ret = adapter->nic_ops->config_led(adapter, 0, 0xf); if (ret) { dev_err(&adapter->pdev->dev, "Failed to reset LED blink state.\n"); @@ -905,7 +920,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev, { struct qlcnic_adapter *adapter = netdev_priv(netdev); - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) return -EINVAL; /* @@ -981,12 +996,19 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) struct qlcnic_adapter *adapter = netdev_priv(netdev); int hw_lro; - if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) + if (data & ~ETH_FLAG_LRO) return -EINVAL; - ethtool_op_set_flags(netdev, data); + if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) + return -EINVAL; - hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0; + if (data & ETH_FLAG_LRO) { + hw_lro = QLCNIC_LRO_ENABLED; + netdev->features |= NETIF_F_LRO; + } else { + hw_lro = 0; + netdev->features &= ~NETIF_F_LRO; + } if (qlcnic_config_hw_lro(adapter, hw_lro)) return -EIO; diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h index ad9d167723c4480793a98153c0c3e0319c809ccc..15fc32070be3dfd689c7acece6658e866b22f59b 100644 --- a/drivers/net/qlcnic/qlcnic_hdr.h +++ b/drivers/net/qlcnic/qlcnic_hdr.h @@ -208,6 +208,39 @@ enum { QLCNIC_HW_PX_MAP_CRB_PGR0 }; +#define BIT_0 0x1 +#define BIT_1 0x2 +#define BIT_2 0x4 +#define BIT_3 0x8 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 +#define BIT_8 0x100 +#define BIT_9 0x200 +#define BIT_10 0x400 +#define BIT_11 0x800 +#define BIT_12 0x1000 +#define BIT_13 0x2000 +#define BIT_14 0x4000 +#define BIT_15 0x8000 +#define BIT_16 0x10000 +#define BIT_17 0x20000 +#define BIT_18 0x40000 +#define BIT_19 0x80000 +#define BIT_20 0x100000 +#define BIT_21 0x200000 +#define BIT_22 0x400000 +#define BIT_23 0x800000 +#define BIT_24 0x1000000 +#define BIT_25 0x2000000 +#define BIT_26 0x4000000 +#define BIT_27 0x8000000 +#define BIT_28 0x10000000 +#define BIT_29 0x20000000 +#define BIT_30 0x40000000 +#define BIT_31 0x80000000 + /* This field defines CRB adr [31:20] of the agents */ #define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ @@ -668,10 +701,11 @@ enum { #define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138)) #define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) -#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) -#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) -#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) +#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) +#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) +#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) #define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) +#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c)) #define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) #define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) @@ -684,15 +718,26 @@ enum { #define QLCNIC_DEV_FAILED 0x6 #define QLCNIC_DEV_QUISCENT 0x7 +#define QLCNIC_DEV_NPAR_NOT_RDY 0 +#define QLCNIC_DEV_NPAR_RDY 1 + +#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) #define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) #define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) #define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) #define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) #define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) +#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4))) +#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4)) + +#define QLCNIC_TYPE_NIC 1 +#define QLCNIC_TYPE_FCOE 2 +#define QLCNIC_TYPE_ISCSI 3 + #define QLCNIC_RCODE_DRIVER_INFO 0x20000000 -#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000 -#define QLCNIC_RCODE_FATAL_ERROR 0x80000000 +#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30 +#define QLCNIC_RCODE_FATAL_ERROR BIT_31 #define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) #define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) @@ -721,6 +766,29 @@ struct qlcnic_legacy_intr_set { u32 pci_int_reg; }; +#define QLCNIC_FW_API 0x1b216c +#define QLCNIC_DRV_OP_MODE 0x1b2170 +#define QLCNIC_MSIX_BASE 0x132110 +#define QLCNIC_MAX_PCI_FUNC 8 + +/* PCI function operational mode */ +enum { + QLCNIC_MGMT_FUNC = 0, + QLCNIC_PRIV_FUNC = 1, + QLCNIC_NON_PRIV_FUNC = 2 +}; + +#define QLC_DEV_DRV_DEFAULT 0x11111111 + +#define LSB(x) ((uint8_t)(x)) +#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) + +#define LSW(x) ((uint16_t)((uint32_t)(x))) +#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) + +#define LSD(x) ((uint32_t)((uint64_t)(x))) +#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) + #define QLCNIC_LEGACY_INTR_CONFIG \ { \ { \ diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index 0c2e1f08f4593af61350bf4bcd4d45452a27f4f4..e08c8b0556a47bf2692d272d7d6a1cd029b69608 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c @@ -327,7 +327,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, i = 0; - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return -EIO; tx_ring = adapter->tx_ring; @@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, if (nr_desc >= qlcnic_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); - __netif_tx_unlock_bh(tx_ring->txq); - adapter->stats.xmit_off++; - return -EBUSY; + smp_mb(); + if (qlcnic_tx_avail(tx_ring) > nr_desc) { + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + adapter->stats.xmit_off++; + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } } do { @@ -407,10 +413,15 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr) return -ENOMEM; } memcpy(cur->mac_addr, addr, ETH_ALEN); - list_add_tail(&cur->list, &adapter->mac_list); - return qlcnic_sre_macaddr_change(adapter, - cur->mac_addr, QLCNIC_MAC_ADD); + if (qlcnic_sre_macaddr_change(adapter, + cur->mac_addr, QLCNIC_MAC_ADD)) { + kfree(cur); + return -EIO; + } + + list_add_tail(&cur->list, &adapter->mac_list); + return 0; } void qlcnic_set_multi(struct net_device *netdev) @@ -420,7 +431,7 @@ void qlcnic_set_multi(struct net_device *netdev) u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; qlcnic_nic_add_mac(adapter, adapter->mac_addr); @@ -538,7 +549,7 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) return rv; } -int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable) +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { struct qlcnic_nic_req req; u64 word; @@ -704,21 +715,15 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) return rc; } -int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac) +int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac) { - u32 crbaddr, mac_hi, mac_lo; + u32 crbaddr; int pci_func = adapter->ahw.pci_func; crbaddr = CRB_MAC_BLOCK_START + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); - mac_lo = QLCRD32(adapter, crbaddr); - mac_hi = QLCRD32(adapter, crbaddr+4); - - if (pci_func & 1) - *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); - else - *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); + qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac); return 0; } @@ -766,7 +771,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter, * Out: 'off' is 2M pci map addr * side effect: lock crb window */ -static void +static int qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) { u32 window; @@ -775,6 +780,10 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) off -= QLCNIC_PCI_CRBSPACE; window = CRB_HI(off); + if (window == 0) { + dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); + return -EIO; + } writel(window, addr); if (readl(addr) != window) { @@ -782,7 +791,9 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d off 0x%lx\n", window, off); + return -EIO; } + return 0; } int @@ -803,11 +814,12 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); - qlcnic_pci_set_crbwindow_2M(adapter, off); - writel(data, addr); + rv = qlcnic_pci_set_crbwindow_2M(adapter, off); + if (!rv) + writel(data, addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - return 0; + return rv; } dev_err(&adapter->pdev->dev, @@ -821,7 +833,7 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) { unsigned long flags; int rv; - u32 data; + u32 data = -1; void __iomem *addr = NULL; rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); @@ -833,8 +845,8 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); - qlcnic_pci_set_crbwindow_2M(adapter, off); - data = readl(addr); + if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) + data = readl(addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); return data; diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 71a4e664ad76970d920c669f3fa8366f92ca62fa..75ba744b173c89d0f598d1d61183fd5d9dca1675 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c @@ -112,18 +112,45 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) rds_ring = &recv_ctx->rds_rings[ring]; for (i = 0; i < rds_ring->num_desc; ++i) { rx_buf = &(rds_ring->rx_buf_arr[i]); - if (rx_buf->state == QLCNIC_BUFFER_FREE) + if (rx_buf->skb == NULL) continue; + pci_unmap_single(adapter->pdev, rx_buf->dma, rds_ring->dma_size, PCI_DMA_FROMDEVICE); - if (rx_buf->skb != NULL) - dev_kfree_skb_any(rx_buf->skb); + + dev_kfree_skb_any(rx_buf->skb); } } } +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = &adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + spin_lock(&rds_ring->lock); + + INIT_LIST_HEAD(&rds_ring->free_list); + + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf++; + } + + spin_unlock(&rds_ring->lock); + } +} + void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_buffer *cmd_buf; @@ -181,7 +208,9 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) tx_ring = adapter->tx_ring; vfree(tx_ring->cmd_buf_arr); + tx_ring->cmd_buf_arr = NULL; kfree(adapter->tx_ring); + adapter->tx_ring = NULL; } int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) @@ -264,7 +293,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) list_add_tail(&rx_buf->list, &rds_ring->free_list); rx_buf->ref_handle = i; - rx_buf->state = QLCNIC_BUFFER_FREE; rx_buf++; } spin_lock_init(&rds_ring->lock); @@ -413,7 +441,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) /* resetall */ qlcnic_rom_lock(adapter); - QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff); + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); qlcnic_rom_unlock(adapter); if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || @@ -521,16 +549,13 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { u32 val; val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); - val = (val >> (adapter->portnum * 4)) & 0xf; - - if ((val & 0x3) != 1) { - dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n", - val); + val = QLC_DEV_GET_DRV(val, adapter->portnum); + if ((val & 0x3) != QLCNIC_TYPE_NIC) { + dev_err(&adapter->pdev->dev, + "Not an Ethernet NIC func=%u\n", val); return -EIO; } - adapter->physical_port = (val >> 2); - if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) timeo = 30; @@ -544,16 +569,34 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { return 0; } +int +qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) +{ + u32 ver = -1, min_ver; + + qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); + + ver = QLCNIC_DECODE_VERSION(ver); + min_ver = QLCNIC_MIN_FW_VERSION; + + if (ver < min_ver) { + dev_err(&adapter->pdev->dev, + "firmware version %d.%d.%d unsupported." + "Min supported version %d.%d.%d\n", + _major(ver), _minor(ver), _build(ver), + _major(min_ver), _minor(min_ver), _build(min_ver)); + return -EINVAL; + } + + return 0; +} + static int qlcnic_has_mn(struct qlcnic_adapter *adapter) { - u32 capability, flashed_ver; + u32 capability; capability = 0; - qlcnic_rom_fast_read(adapter, - QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver); - flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver); - capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) return 1; @@ -1007,7 +1050,7 @@ static int qlcnic_validate_firmware(struct qlcnic_adapter *adapter) { __le32 val; - u32 ver, min_ver, bios, min_size; + u32 ver, bios, min_size; struct pci_dev *pdev = adapter->pdev; const struct firmware *fw = adapter->fw; u8 fw_type = adapter->fw_type; @@ -1029,12 +1072,9 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter) return -EINVAL; val = qlcnic_get_fw_version(adapter); - - min_ver = QLCNIC_VERSION_CODE(4, 0, 216); - ver = QLCNIC_DECODE_VERSION(val); - if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) { + if (ver < QLCNIC_MIN_FW_VERSION) { dev_err(&pdev->dev, "%s: firmware version %d.%d.%d unsupported\n", fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); @@ -1122,7 +1162,7 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter) adapter->fw = NULL; } -int qlcnic_phantom_init(struct qlcnic_adapter *adapter) +static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) { u32 val; int retries = 60; @@ -1147,7 +1187,8 @@ int qlcnic_phantom_init(struct qlcnic_adapter *adapter) QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); out_err: - dev_err(&adapter->pdev->dev, "firmware init failed\n"); + dev_err(&adapter->pdev->dev, "Command Peg initialization not " + "complete, state: 0x%x.\n", val); return -EIO; } @@ -1180,6 +1221,10 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter) { int err; + err = qlcnic_cmd_peg_ready(adapter); + if (err) + return err; + err = qlcnic_receive_peg_ready(adapter); if (err) return err; @@ -1265,14 +1310,12 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, dma_addr_t dma; struct pci_dev *pdev = adapter->pdev; - buffer->skb = dev_alloc_skb(rds_ring->skb_size); - if (!buffer->skb) { + skb = dev_alloc_skb(rds_ring->skb_size); + if (!skb) { adapter->stats.skb_alloc_failure++; return -ENOMEM; } - skb = buffer->skb; - skb_reserve(skb, 2); dma = pci_map_single(pdev, skb->data, @@ -1281,13 +1324,11 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, if (pci_dma_mapping_error(pdev, dma)) { adapter->stats.rx_dma_map_error++; dev_kfree_skb_any(skb); - buffer->skb = NULL; return -ENOMEM; } buffer->skb = skb; buffer->dma = dma; - buffer->state = QLCNIC_BUFFER_BUSY; return 0; } @@ -1300,14 +1341,15 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, buffer = &rds_ring->rx_buf_arr[index]; + if (unlikely(buffer->skb == NULL)) { + WARN_ON(1); + return NULL; + } + pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, PCI_DMA_FROMDEVICE); skb = buffer->skb; - if (!skb) { - adapter->stats.null_skb++; - goto no_skb; - } if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { adapter->stats.csummed++; @@ -1319,8 +1361,7 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, skb->dev = adapter->netdev; buffer->skb = NULL; -no_skb: - buffer->state = QLCNIC_BUFFER_FREE; + return skb; } @@ -1495,7 +1536,7 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) WARN_ON(desc_cnt > 1); - if (rxbuf) + if (likely(rxbuf)) list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); else adapter->stats.null_rxbuf++; @@ -1701,3 +1742,24 @@ qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) sds_ring->consumer = consumer; writel(consumer, sds_ring->crb_sts_consumer); } + +void +qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, + u8 alt_mac, u8 *mac) +{ + u32 mac_low, mac_high; + int i; + + mac_low = QLCRD32(adapter, off1); + mac_high = QLCRD32(adapter, off2); + + if (alt_mac) { + mac_low |= (mac_low >> 16) | (mac_high << 16); + mac_high >>= 16; + } + + for (i = 0; i < 2; i++) + mac[i] = (u8)(mac_high >> ((1 - i) * 8)); + for (i = 2; i < 6; i++) + mac[i] = (u8)(mac_low >> ((5 - i) * 8)); +} diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 23ea9caa526177ff522e41f017a75aeb8be67a26..b9615bd745ea5d9175e6d6153d5c924084d550d2 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -34,15 +34,16 @@ #include #include #include +#include -MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver"); +MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(QLCNIC_LINUX_VERSIONID); MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); char qlcnic_driver_name[] = "qlcnic"; -static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v" - QLCNIC_LINUX_VERSIONID; +static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " + "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG; @@ -65,13 +66,16 @@ static int load_fw_file; module_param(load_fw_file, int, 0644); MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); +static int qlcnic_config_npars; +module_param(qlcnic_config_npars, int, 0644); +MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); + static int __devinit qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void __devexit qlcnic_remove(struct pci_dev *pdev); static int qlcnic_open(struct net_device *netdev); static int qlcnic_close(struct net_device *netdev); static void qlcnic_tx_timeout(struct net_device *netdev); -static void qlcnic_tx_timeout_task(struct work_struct *work); static void qlcnic_attach_work(struct work_struct *work); static void qlcnic_fwinit_work(struct work_struct *work); static void qlcnic_fw_poll_work(struct work_struct *work); @@ -79,6 +83,7 @@ static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, work_func_t func, int delay); static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); static int qlcnic_poll(struct napi_struct *napi, int budget); +static int qlcnic_rx_poll(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void qlcnic_poll_controller(struct net_device *netdev); #endif @@ -99,7 +104,12 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data); static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); +static int qlcnic_start_firmware(struct qlcnic_adapter *); +static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); +static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); +static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); +static int qlcnicvf_start_firmware(struct qlcnic_adapter *); /* PCI Device ID Table */ #define ENTRY(device) \ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ @@ -120,12 +130,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { writel(tx_ring->producer, tx_ring->crb_cmd_producer); - - if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) { - netif_stop_queue(adapter->netdev); - smp_mb(); - adapter->stats.xmit_off++; - } } static const u32 msi_tgt_status[8] = { @@ -184,8 +188,13 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; - netif_napi_add(netdev, &sds_ring->napi, - qlcnic_poll, QLCNIC_NETDEV_WEIGHT); + + if (ring == adapter->max_sds_rings - 1) + netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, + QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2); } return 0; @@ -307,19 +316,14 @@ static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count) static int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) { - int i; - unsigned char *p; - u64 mac_addr; + u8 mac_addr[ETH_ALEN]; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0) + if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0) return -EIO; - p = (unsigned char *)&mac_addr; - for (i = 0; i < 6; i++) - netdev->dev_addr[i] = *(p + 5 - i); - + memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); @@ -340,7 +344,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; - if (netif_running(netdev)) { + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_detach(netdev); qlcnic_napi_disable(adapter); } @@ -349,7 +353,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); qlcnic_set_multi(adapter->netdev); - if (netif_running(netdev)) { + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_attach(netdev); qlcnic_napi_enable(adapter); } @@ -371,6 +375,20 @@ static const struct net_device_ops qlcnic_netdev_ops = { #endif }; +static struct qlcnic_nic_template qlcnic_ops = { + .get_mac_addr = qlcnic_get_mac_address, + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_config_led, + .start_firmware = qlcnic_start_firmware +}; + +static struct qlcnic_nic_template qlcnic_vf_ops = { + .get_mac_addr = qlcnic_get_mac_address, + .config_bridged_mode = qlcnicvf_config_bridged_mode, + .config_led = qlcnicvf_config_led, + .start_firmware = qlcnicvf_start_firmware +}; + static void qlcnic_setup_intr(struct qlcnic_adapter *adapter) { @@ -452,6 +470,169 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) iounmap(adapter->ahw.pci_base0); } +static int +qlcnic_init_pci_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC]; + int i, ret = 0, err; + u8 pfn; + + if (!adapter->npars) + adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * + QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); + if (!adapter->npars) + return -ENOMEM; + + if (!adapter->eswitch) + adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * + QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); + if (!adapter->eswitch) { + err = -ENOMEM; + goto err_eswitch; + } + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (!ret) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + pfn = pci_info[i].id; + if (pfn > QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + adapter->npars[pfn].active = pci_info[i].active; + adapter->npars[pfn].type = pci_info[i].type; + adapter->npars[pfn].phy_port = pci_info[i].default_port; + adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN; + adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; + adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; + } + + for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) + adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; + + return ret; + } + + kfree(adapter->eswitch); + adapter->eswitch = NULL; +err_eswitch: + kfree(adapter->npars); + + return ret; +} + +static int +qlcnic_set_function_modes(struct qlcnic_adapter *adapter) +{ + u8 id; + u32 ref_count; + int i, ret = 1; + u32 data = QLCNIC_MGMT_FUNC; + void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; + + /* If other drivers are not in use set their privilege level */ + ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); + ret = qlcnic_api_lock(adapter); + if (ret) + goto err_lock; + if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func)) + goto err_npar; + + if (qlcnic_config_npars) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + id = i; + if (adapter->npars[i].type != QLCNIC_TYPE_NIC || + id == adapter->ahw.pci_func) + continue; + data |= (qlcnic_config_npars & + QLC_DEV_SET_DRV(0xf, id)); + } + } else { + data = readl(priv_op); + data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) | + (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, + adapter->ahw.pci_func)); + } + writel(data, priv_op); +err_npar: + qlcnic_api_unlock(adapter); +err_lock: + return ret; +} + +static u32 +qlcnic_get_driver_mode(struct qlcnic_adapter *adapter) +{ + void __iomem *msix_base_addr; + void __iomem *priv_op; + struct qlcnic_info nic_info; + u32 func; + u32 msix_base; + u32 op_mode, priv_level; + + /* Determine FW API version */ + adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API); + + /* Find PCI function number */ + pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); + msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE; + msix_base = readl(msix_base_addr); + func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; + adapter->ahw.pci_func = func; + + if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) { + adapter->capabilities = nic_info.capabilities; + + if (adapter->capabilities & BIT_6) + adapter->flags |= QLCNIC_ESWITCH_ENABLED; + else + adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + } + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + adapter->nic_ops = &qlcnic_ops; + return adapter->fw_hal_version; + } + + /* Determine function privilege level */ + priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; + op_mode = readl(priv_op); + if (op_mode == QLC_DEV_DRV_DEFAULT) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); + + switch (priv_level) { + case QLCNIC_MGMT_FUNC: + adapter->op_mode = QLCNIC_MGMT_FUNC; + adapter->nic_ops = &qlcnic_ops; + qlcnic_init_pci_info(adapter); + /* Set privilege level for other functions */ + qlcnic_set_function_modes(adapter); + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Management function\n", + adapter->fw_hal_version); + break; + case QLCNIC_PRIV_FUNC: + adapter->op_mode = QLCNIC_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Privileged function\n", + adapter->fw_hal_version); + adapter->nic_ops = &qlcnic_ops; + break; + case QLCNIC_NON_PRIV_FUNC: + adapter->op_mode = QLCNIC_NON_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged function\n", + adapter->fw_hal_version); + adapter->nic_ops = &qlcnic_vf_ops; + break; + default: + dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n", + priv_level); + return 0; + } + return adapter->fw_hal_version; +} + static int qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) { @@ -460,7 +641,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) unsigned long mem_len, pci_len0 = 0; struct pci_dev *pdev = adapter->pdev; - int pci_func = adapter->ahw.pci_func; /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ @@ -483,8 +663,13 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) adapter->ahw.pci_base0 = mem_ptr0; adapter->ahw.pci_len0 = pci_len0; + if (!qlcnic_get_driver_mode(adapter)) { + iounmap(adapter->ahw.pci_base0); + return -EIO; + } + adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); + QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func))); return 0; } @@ -509,7 +694,7 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name) } if (!found) - name = "Unknown"; + sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); } static void @@ -521,7 +706,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter) int i, offset, val; int *ptr32; struct pci_dev *pdev = adapter->pdev; - + struct qlcnic_info nic_info; adapter->driver_mismatch = 0; ptr32 = (int *)&serial_num; @@ -553,8 +738,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter) dev_info(&pdev->dev, "firmware v%d.%d.%d\n", fw_major, fw_minor, fw_build); - adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1); - adapter->flags &= ~QLCNIC_LRO_ENABLED; if (adapter->ahw.port_type == QLCNIC_XGBE) { @@ -565,6 +748,16 @@ qlcnic_check_options(struct qlcnic_adapter *adapter) adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; } + if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) { + adapter->physical_port = nic_info.phys_port; + adapter->switch_mode = nic_info.switch_mode; + adapter->max_tx_ques = nic_info.max_tx_ques; + adapter->max_rx_ques = nic_info.max_rx_ques; + adapter->capabilities = nic_info.capabilities; + adapter->max_mac_filters = nic_info.max_mac_filters; + adapter->max_mtu = nic_info.max_mtu; + } + adapter->msix_supported = !!use_msi_x; adapter->rss_supported = !!use_msi_x; @@ -573,6 +766,50 @@ qlcnic_check_options(struct qlcnic_adapter *adapter) adapter->max_rds_rings = 2; } +static int +qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) +{ + int i, err = 0; + struct qlcnic_npar_info *npar; + struct qlcnic_info nic_info; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || + !adapter->need_fw_reset) + return 0; + + if (adapter->op_mode == QLCNIC_MGMT_FUNC) { + /* Set the NPAR config data after FW reset */ + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + npar = &adapter->npars[i]; + if (npar->type != QLCNIC_TYPE_NIC) + continue; + err = qlcnic_get_nic_info(adapter, &nic_info, i); + if (err) + goto err_out; + nic_info.min_tx_bw = npar->min_bw; + nic_info.max_tx_bw = npar->max_bw; + err = qlcnic_set_nic_info(adapter, &nic_info); + if (err) + goto err_out; + + if (npar->enable_pm) { + err = qlcnic_config_port_mirroring(adapter, + npar->dest_npar, 1, i); + if (err) + goto err_out; + + } + npar->mac_learning = DEFAULT_MAC_LEARN; + npar->host_vlan_tag = 0; + npar->promisc_mode = 0; + npar->discard_tagged = 0; + npar->vlan_id = 0; + } + } +err_out: + return err; +} + static int qlcnic_start_firmware(struct qlcnic_adapter *adapter) { @@ -591,8 +828,12 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter) if (load_fw_file) qlcnic_request_firmware(adapter); - else + else { + if (qlcnic_check_flash_fw_ver(adapter)) + goto err_out; + adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; + } err = qlcnic_need_fw_reset(adapter); if (err < 0) @@ -602,6 +843,7 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter) if (first_boot != 0x55555555) { QLCWR32(adapter, CRB_CMDPEG_STATE, 0); + QLCWR32(adapter, CRB_RCVPEG_STATE, 0); qlcnic_pinit_from_rom(adapter); msleep(1); } @@ -624,7 +866,7 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter) wait_init: /* Handshake with the card before we register the devices. */ - err = qlcnic_phantom_init(adapter); + err = qlcnic_init_firmware(adapter); if (err) goto err_out; @@ -632,6 +874,9 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter) qlcnic_idc_debug_info(adapter, 1); qlcnic_check_options(adapter); + if (qlcnic_reset_npar_config(adapter)) + goto err_out; + qlcnic_dev_set_npar_ready(adapter); adapter->need_fw_reset = 0; @@ -716,9 +961,23 @@ qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter) static int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) { + int ring; + struct qlcnic_host_rds_ring *rds_ring; + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return -EIO; + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return 0; + + if (qlcnic_fw_create_ctx(adapter)) + return -EIO; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx.rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, ring, rds_ring); + } + qlcnic_set_multi(netdev); qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); @@ -736,6 +995,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) qlcnic_linkevent_request(adapter, 1); + adapter->reset_context = 0; set_bit(__QLCNIC_DEV_UP, &adapter->state); return 0; } @@ -775,6 +1035,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) qlcnic_napi_disable(adapter); + qlcnic_fw_destroy_ctx(adapter); + + qlcnic_reset_rx_buffers_list(adapter); qlcnic_release_tx_buffers(adapter); spin_unlock(&adapter->tx_clean_lock); } @@ -796,16 +1059,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - int err, ring; - struct qlcnic_host_rds_ring *rds_ring; + int err; if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) return 0; - err = qlcnic_init_firmware(adapter); - if (err) - return err; - err = qlcnic_napi_add(adapter, netdev); if (err) return err; @@ -813,7 +1071,7 @@ qlcnic_attach(struct qlcnic_adapter *adapter) err = qlcnic_alloc_sw_resources(adapter); if (err) { dev_err(&pdev->dev, "Error in setting sw resources\n"); - return err; + goto err_out_napi_del; } err = qlcnic_alloc_hw_resources(adapter); @@ -822,16 +1080,10 @@ qlcnic_attach(struct qlcnic_adapter *adapter) goto err_out_free_sw; } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &adapter->recv_ctx.rds_rings[ring]; - qlcnic_post_rx_buffers(adapter, ring, rds_ring); - } - err = qlcnic_request_irq(adapter); if (err) { dev_err(&pdev->dev, "failed to setup interrupt\n"); - goto err_out_free_rxbuf; + goto err_out_free_hw; } qlcnic_init_coalesce_defaults(adapter); @@ -841,11 +1093,12 @@ qlcnic_attach(struct qlcnic_adapter *adapter) adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; return 0; -err_out_free_rxbuf: - qlcnic_release_rx_buffers(adapter); +err_out_free_hw: qlcnic_free_hw_resources(adapter); err_out_free_sw: qlcnic_free_sw_resources(adapter); +err_out_napi_del: + qlcnic_napi_del(adapter); return err; } @@ -880,6 +1133,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) } } + qlcnic_fw_destroy_ctx(adapter); + qlcnic_detach(adapter); adapter->diag_test = 0; @@ -898,6 +1153,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_ring; int ring; int ret; @@ -917,6 +1173,18 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) return ret; } + ret = qlcnic_fw_create_ctx(adapter); + if (ret) { + qlcnic_detach(adapter); + netif_device_attach(netdev); + return ret; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx.rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, ring, rds_ring); + } + if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &adapter->recv_ctx.sds_rings[ring]; @@ -928,6 +1196,27 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) return 0; } +/* Reset context in hardware only */ +static int +qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + + qlcnic_down(adapter, netdev); + + qlcnic_up(adapter, netdev); + + netif_device_attach(netdev); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return 0; +} + int qlcnic_reset_context(struct qlcnic_adapter *adapter) { @@ -971,18 +1260,21 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, adapter->max_mc_count = 38; netdev->netdev_ops = &qlcnic_netdev_ops; - netdev->watchdog_timeo = 2*HZ; + netdev->watchdog_timeo = 5*HZ; qlcnic_change_mtu(netdev, netdev->mtu); SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); - netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); - netdev->features |= (NETIF_F_GRO); - netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); + netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_GRO); + netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); - netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); - netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { + netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6); + netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); + } if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; @@ -997,8 +1289,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, netdev->irq = adapter->msix_entries[0].vector; - INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task); - if (qlcnic_read_mac_addr(adapter)) dev_warn(&pdev->dev, "failed to read mac addr\n"); @@ -1036,7 +1326,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev = NULL; struct qlcnic_adapter *adapter = NULL; int err; - int pci_func_id = PCI_FUNC(pdev->devfn); uint8_t revision_id; uint8_t pci_using_dac; @@ -1058,6 +1347,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_pdev; pci_set_master(pdev); + pci_enable_pcie_error_reporting(pdev); netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); if (!netdev) { @@ -1072,7 +1362,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->netdev = netdev; adapter->pdev = pdev; adapter->dev_rst_time = jiffies; - adapter->ahw.pci_func = pci_func_id; revision_id = pdev->revision; adapter->ahw.revision_id = revision_id; @@ -1088,7 +1377,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_netdev; /* This will be reset for mezz cards */ - adapter->portnum = pci_func_id; + adapter->portnum = adapter->ahw.pci_func; err = qlcnic_get_board_info(adapter); if (err) { @@ -1102,7 +1391,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (qlcnic_setup_idc_param(adapter)) goto err_out_iounmap; - err = qlcnic_start_firmware(adapter); + err = adapter->nic_ops->start_firmware(adapter); if (err) { dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); goto err_out_decr_ref; @@ -1171,10 +1460,13 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev) unregister_netdev(netdev); - cancel_work_sync(&adapter->tx_timeout_task); - qlcnic_detach(adapter); + if (adapter->npars != NULL) + kfree(adapter->npars); + if (adapter->eswitch != NULL) + kfree(adapter->eswitch); + qlcnic_clr_all_drv_state(adapter); clear_bit(__QLCNIC_RESETTING, &adapter->state); @@ -1187,6 +1479,7 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev) qlcnic_release_firmware(adapter); + pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); @@ -1206,10 +1499,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev) if (netif_running(netdev)) qlcnic_down(adapter, netdev); - cancel_work_sync(&adapter->tx_timeout_task); - - qlcnic_detach(adapter); - qlcnic_clr_all_drv_state(adapter); clear_bit(__QLCNIC_RESETTING, &adapter->state); @@ -1263,35 +1552,23 @@ qlcnic_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); - err = qlcnic_start_firmware(adapter); + err = adapter->nic_ops->start_firmware(adapter); if (err) { dev_err(&pdev->dev, "failed to start firmware\n"); return err; } if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (err) - goto err_out; - err = qlcnic_up(adapter, netdev); if (err) - goto err_out_detach; - + goto done; qlcnic_config_indev_addr(netdev, NETDEV_UP); } - +done: netif_device_attach(netdev); qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); return 0; - -err_out_detach: - qlcnic_detach(adapter); -err_out: - qlcnic_clr_all_drv_state(adapter); - netif_device_attach(netdev); - return err; } #endif @@ -1340,11 +1617,11 @@ qlcnic_tso_check(struct net_device *netdev, u8 opcode = TX_ETHER_PKT; __be16 protocol = skb->protocol; u16 flags = 0, vid = 0; - u32 producer; int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; struct cmd_desc_type0 *hwdesc; struct vlan_ethhdr *vh; struct qlcnic_adapter *adapter = netdev_priv(netdev); + u32 producer = tx_ring->producer; if (protocol == cpu_to_be16(ETH_P_8021Q)) { @@ -1360,6 +1637,11 @@ qlcnic_tso_check(struct net_device *netdev, vlan_oob = 1; } + if (*(skb->data) & BIT_0) { + flags |= BIT_0; + memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); + } + if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && skb_shinfo(skb)->gso_size > 0) { @@ -1409,7 +1691,6 @@ qlcnic_tso_check(struct net_device *netdev, /* For LSO, we need to copy the MAC/IP/TCP headers into * the descriptor ring */ - producer = tx_ring->producer; copied = 0; offset = 2; @@ -1537,10 +1818,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; - if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) { + if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_stop_queue(netdev); - adapter->stats.xmit_off++; - return NETDEV_TX_BUSY; + smp_mb(); + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_start_queue(netdev); + else { + adapter->stats.xmit_off++; + return NETDEV_TX_BUSY; + } } producer = tx_ring->producer; @@ -1675,35 +1961,11 @@ static void qlcnic_tx_timeout(struct net_device *netdev) return; dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - schedule_work(&adapter->tx_timeout_task); -} - -static void qlcnic_tx_timeout_task(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = - container_of(work, struct qlcnic_adapter, tx_timeout_task); - - if (!netif_running(adapter->netdev)) - return; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return; if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) - goto request_reset; - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - if (!qlcnic_reset_context(adapter)) { - adapter->netdev->trans_start = jiffies; - return; - - /* context reset failed, fall through for fw reset */ - } - -request_reset: - adapter->need_fw_reset = 1; - clear_bit(__QLCNIC_RESETTING, &adapter->state); - QLCDB(adapter, DRV, "Resetting adapter\n"); + adapter->need_fw_reset = 1; + else + adapter->reset_context = 1; } static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) @@ -1846,14 +2108,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { - __netif_tx_lock(tx_ring->txq, smp_processor_id()); if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { netif_wake_queue(netdev); - adapter->tx_timeo_cnt = 0; adapter->stats.xmit_on++; } - __netif_tx_unlock(tx_ring->txq); } + adapter->tx_timeo_cnt = 0; } /* * If everything is freed up to consumer then check if the ring is full @@ -1898,6 +2158,25 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) return work_done; } +static int qlcnic_rx_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_sds_ring *sds_ring = + container_of(napi, struct qlcnic_host_sds_ring, napi); + + struct qlcnic_adapter *adapter = sds_ring->adapter; + int work_done; + + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_int(sds_ring); + } + + return work_done; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void qlcnic_poll_controller(struct net_device *netdev) { @@ -2109,7 +2388,7 @@ qlcnic_fwinit_work(struct work_struct *work) { struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); - u32 dev_state = 0xf; + u32 dev_state = 0xf, npar_state; if (qlcnic_api_lock(adapter)) goto err_ret; @@ -2122,6 +2401,19 @@ qlcnic_fwinit_work(struct work_struct *work) return; } + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { + npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + if (npar_state == QLCNIC_DEV_NPAR_RDY) { + qlcnic_api_unlock(adapter); + goto wait_npar; + } else { + qlcnic_schedule_work(adapter, qlcnic_fwinit_work, + FW_POLL_DELAY); + qlcnic_api_unlock(adapter); + return; + } + } + if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", adapter->reset_ack_timeo); @@ -2154,7 +2446,7 @@ qlcnic_fwinit_work(struct work_struct *work) qlcnic_api_unlock(adapter); - if (!qlcnic_start_firmware(adapter)) { + if (!adapter->nic_ops->start_firmware(adapter)) { qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); return; } @@ -2163,6 +2455,7 @@ qlcnic_fwinit_work(struct work_struct *work) qlcnic_api_unlock(adapter); +wait_npar: dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); @@ -2177,7 +2470,7 @@ qlcnic_fwinit_work(struct work_struct *work) break; default: - if (!qlcnic_start_firmware(adapter)) { + if (!adapter->nic_ops->start_firmware(adapter)) { qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); return; } @@ -2202,10 +2495,6 @@ qlcnic_detach_work(struct work_struct *work) qlcnic_down(adapter, netdev); - rtnl_lock(); - qlcnic_detach(adapter); - rtnl_unlock(); - status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); if (status & QLCNIC_RCODE_FATAL_ERROR) @@ -2237,6 +2526,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) { u32 state; + adapter->need_fw_reset = 1; if (qlcnic_api_lock(adapter)) return; @@ -2251,10 +2541,36 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) qlcnic_api_unlock(adapter); } +/* Transit to NPAR READY state from NPAR NOT READY state */ +static void +qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) +{ + u32 state; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || + adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return; + if (qlcnic_api_lock(adapter)) + return; + + state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + + if (state != QLCNIC_DEV_NPAR_RDY) { + QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_DEV_NPAR_RDY); + QLCDB(adapter, DRV, "NPAR READY state set\n"); + } + + qlcnic_api_unlock(adapter); +} + static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, work_func_t func, int delay) { + if (test_bit(__QLCNIC_AER, &adapter->state)) + return; + INIT_DELAYED_WORK(&adapter->fw_work, func); schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay)); } @@ -2274,18 +2590,10 @@ qlcnic_attach_work(struct work_struct *work) struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); struct net_device *netdev = adapter->netdev; - int err; if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (err) - goto done; - - err = qlcnic_up(adapter, netdev); - if (err) { - qlcnic_detach(adapter); + if (qlcnic_up(adapter, netdev)) goto done; - } qlcnic_config_indev_addr(netdev, NETDEV_UP); } @@ -2322,6 +2630,13 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) adapter->fw_fail_cnt = 0; if (adapter->need_fw_reset) goto detach; + + if (adapter->reset_context && + auto_fw_reset == AUTO_FW_RESET_ENABLED) { + qlcnic_reset_hw_context(adapter); + adapter->netdev->trans_start = jiffies; + } + return 0; } @@ -2330,7 +2645,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) qlcnic_dev_request_reset(adapter); - clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + if ((auto_fw_reset == AUTO_FW_RESET_ENABLED)) + clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); dev_info(&netdev->dev, "firmware hang detected\n"); @@ -2365,6 +2681,161 @@ qlcnic_fw_poll_work(struct work_struct *work) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); } +static int qlcnic_is_first_func(struct pci_dev *pdev) +{ + struct pci_dev *oth_pdev; + int val = pdev->devfn; + + while (val-- > 0) { + oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr + (pdev->bus), pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), val)); + if (!oth_pdev) + continue; + + if (oth_pdev->current_state != PCI_D3cold) { + pci_dev_put(oth_pdev); + return 0; + } + pci_dev_put(oth_pdev); + } + return 1; +} + +static int qlcnic_attach_func(struct pci_dev *pdev) +{ + int err, first_func; + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + pdev->error_state = pci_channel_io_normal; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + first_func = qlcnic_is_first_func(pdev); + + if (qlcnic_api_lock(adapter)) + return -EINVAL; + + if (first_func) { + adapter->need_fw_reset = 1; + set_bit(__QLCNIC_START_FW, &adapter->state); + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); + QLCDB(adapter, DRV, "Restarting fw\n"); + } + qlcnic_api_unlock(adapter); + + err = adapter->nic_ops->start_firmware(adapter); + if (err) + return err; + + qlcnic_clr_drv_state(adapter); + qlcnic_setup_intr(adapter); + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (err) { + qlcnic_clr_all_drv_state(adapter); + clear_bit(__QLCNIC_AER, &adapter->state); + netif_device_attach(netdev); + return err; + } + + err = qlcnic_up(adapter, netdev); + if (err) + goto done; + + qlcnic_config_indev_addr(netdev, NETDEV_UP); + } + done: + netif_device_attach(netdev); + return err; +} + +static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (state == pci_channel_io_normal) + return PCI_ERS_RESULT_RECOVERED; + + set_bit(__QLCNIC_AER, &adapter->state); + netif_device_detach(netdev); + + cancel_delayed_work_sync(&adapter->fw_work); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + qlcnic_teardown_intr(adapter); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + pci_save_state(pdev); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) +{ + return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_RECOVERED; +} + +static void qlcnic_io_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY && + test_and_clear_bit(__QLCNIC_AER, &adapter->state)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); +} + + +static int +qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_can_start_firmware(adapter); + if (err) + return err; + + qlcnic_check_options(adapter); + + adapter->need_fw_reset = 0; + + return err; +} + +static int +qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) +{ + return -EOPNOTSUPP; +} + +static int +qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +{ + return -EOPNOTSUPP; +} + static ssize_t qlcnic_store_bridged_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) @@ -2376,13 +2847,13 @@ qlcnic_store_bridged_mode(struct device *dev, if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)) goto err_out; - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto err_out; if (strict_strtoul(buf, 2, &new)) goto err_out; - if (!qlcnic_config_bridged_mode(adapter, !!new)) + if (!adapter->nic_ops->config_bridged_mode(adapter, !!new)) ret = len; err_out: @@ -2585,6 +3056,361 @@ static struct bin_attribute bin_attr_mem = { .write = qlcnic_sysfs_write_mem, }; +static int +validate_pm_config(struct qlcnic_adapter *adapter, + struct qlcnic_pm_func_cfg *pm_cfg, int count) +{ + + u8 src_pci_func, s_esw_id, d_esw_id; + u8 dest_pci_func; + int i; + + for (i = 0; i < count; i++) { + src_pci_func = pm_cfg[i].pci_func; + dest_pci_func = pm_cfg[i].dest_npar; + if (src_pci_func >= QLCNIC_MAX_PCI_FUNC + || dest_pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (!IS_VALID_MODE(pm_cfg[i].action)) + return QL_STATUS_INVALID_PARAM; + + s_esw_id = adapter->npars[src_pci_func].phy_port; + d_esw_id = adapter->npars[dest_pci_func].phy_port; + + if (s_esw_id != d_esw_id) + return QL_STATUS_INVALID_PARAM; + + } + return 0; + +} + +static ssize_t +qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg *pm_cfg; + u32 id, action, pci_func; + int count, rem, i, ret; + + count = size / sizeof(struct qlcnic_pm_func_cfg); + rem = size % sizeof(struct qlcnic_pm_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + pm_cfg = (struct qlcnic_pm_func_cfg *) buf; + + ret = validate_pm_config(adapter, pm_cfg, count); + if (ret) + return ret; + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + action = pm_cfg[i].action; + id = adapter->npars[pci_func].phy_port; + ret = qlcnic_config_port_mirroring(adapter, id, + action, pci_func); + if (ret) + return ret; + } + + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + id = adapter->npars[pci_func].phy_port; + adapter->npars[pci_func].enable_pm = pm_cfg[i].action; + adapter->npars[pci_func].dest_npar = id; + } + return size; +} + +static ssize_t +qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC]; + int i; + + if (size != sizeof(pm_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + pm_cfg[i].action = adapter->npars[i].enable_pm; + pm_cfg[i].dest_npar = 0; + pm_cfg[i].pci_func = i; + } + memcpy(buf, &pm_cfg, size); + + return size; +} + +static int +validate_esw_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg, int count) +{ + u8 pci_func; + int i; + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + if (pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (esw_cfg->host_vlan_tag == 1) + if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) + return QL_STATUS_INVALID_PARAM; + + if (!IS_VALID_MODE(esw_cfg[i].promisc_mode) + || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag) + || !IS_VALID_MODE(esw_cfg[i].mac_learning) + || !IS_VALID_MODE(esw_cfg[i].discard_tagged)) + return QL_STATUS_INVALID_PARAM; + } + + return 0; +} + +static ssize_t +qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg *esw_cfg; + int count, rem, i, ret; + u8 id, pci_func; + + count = size / sizeof(struct qlcnic_esw_func_cfg); + rem = size % sizeof(struct qlcnic_esw_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + esw_cfg = (struct qlcnic_esw_func_cfg *) buf; + ret = validate_esw_config(adapter, esw_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + id = adapter->npars[pci_func].phy_port; + ret = qlcnic_config_switch_port(adapter, id, + esw_cfg[i].host_vlan_tag, + esw_cfg[i].discard_tagged, + esw_cfg[i].promisc_mode, + esw_cfg[i].mac_learning, + esw_cfg[i].pci_func, + esw_cfg[i].vlan_id); + if (ret) + return ret; + } + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode; + adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning; + adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id; + adapter->npars[pci_func].discard_tagged = + esw_cfg[i].discard_tagged; + adapter->npars[pci_func].host_vlan_tag = + esw_cfg[i].host_vlan_tag; + } + + return size; +} + +static ssize_t +qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; + int i; + + if (size != sizeof(esw_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + + esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag; + esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode; + esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged; + esw_cfg[i].vlan_id = adapter->npars[i].vlan_id; + esw_cfg[i].mac_learning = adapter->npars[i].mac_learning; + } + memcpy(buf, &esw_cfg, size); + + return size; +} + +static int +validate_npar_config(struct qlcnic_adapter *adapter, + struct qlcnic_npar_func_cfg *np_cfg, int count) +{ + u8 pci_func, i; + + for (i = 0; i < count; i++) { + pci_func = np_cfg[i].pci_func; + if (pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (!IS_VALID_BW(np_cfg[i].min_bw) + || !IS_VALID_BW(np_cfg[i].max_bw) + || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues) + || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues)) + return QL_STATUS_INVALID_PARAM; + } + return 0; +} + +static ssize_t +qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_info nic_info; + struct qlcnic_npar_func_cfg *np_cfg; + int i, count, rem, ret; + u8 pci_func; + + count = size / sizeof(struct qlcnic_npar_func_cfg); + rem = size % sizeof(struct qlcnic_npar_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + np_cfg = (struct qlcnic_npar_func_cfg *) buf; + ret = validate_npar_config(adapter, np_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count ; i++) { + pci_func = np_cfg[i].pci_func; + ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); + if (ret) + return ret; + nic_info.pci_func = pci_func; + nic_info.min_tx_bw = np_cfg[i].min_bw; + nic_info.max_tx_bw = np_cfg[i].max_bw; + ret = qlcnic_set_nic_info(adapter, &nic_info); + if (ret) + return ret; + adapter->npars[i].min_bw = nic_info.min_tx_bw; + adapter->npars[i].max_bw = nic_info.max_tx_bw; + } + + return size; + +} +static ssize_t +qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_info nic_info; + struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC]; + int i, ret; + + if (size != sizeof(np_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + ret = qlcnic_get_nic_info(adapter, &nic_info, i); + if (ret) + return ret; + + np_cfg[i].pci_func = i; + np_cfg[i].op_mode = nic_info.op_mode; + np_cfg[i].port_num = nic_info.phys_port; + np_cfg[i].fw_capab = nic_info.capabilities; + np_cfg[i].min_bw = nic_info.min_tx_bw ; + np_cfg[i].max_bw = nic_info.max_tx_bw; + np_cfg[i].max_tx_queues = nic_info.max_tx_ques; + np_cfg[i].max_rx_queues = nic_info.max_rx_ques; + } + memcpy(buf, &np_cfg, size); + return size; +} + +static ssize_t +qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC]; + struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC]; + int i, ret; + + if (size != sizeof(pci_cfg)) + return QL_STATUS_INVALID_PARAM; + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (ret) + return ret; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { + pci_cfg[i].pci_func = pci_info[i].id; + pci_cfg[i].func_type = pci_info[i].type; + pci_cfg[i].port_num = pci_info[i].default_port; + pci_cfg[i].min_bw = pci_info[i].tx_min_bw; + pci_cfg[i].max_bw = pci_info[i].tx_max_bw; + memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); + } + memcpy(buf, &pci_cfg, size); + return size; + +} +static struct bin_attribute bin_attr_npar_config = { + .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_npar_config, + .write = qlcnic_sysfs_write_npar_config, +}; + +static struct bin_attribute bin_attr_pci_config = { + .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pci_config, + .write = NULL, +}; + +static struct bin_attribute bin_attr_esw_config = { + .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_esw_config, + .write = qlcnic_sysfs_write_esw_config, +}; + +static struct bin_attribute bin_attr_pm_config = { + .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pm_config, + .write = qlcnic_sysfs_write_pm_config, +}; + static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) { @@ -2610,23 +3436,45 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return; if (device_create_file(dev, &dev_attr_diag_mode)) dev_info(dev, "failed to create diag_mode sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_crb)) dev_info(dev, "failed to create crb sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); -} + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || + adapter->op_mode != QLCNIC_MGMT_FUNC) + return; + if (device_create_bin_file(dev, &bin_attr_pci_config)) + dev_info(dev, "failed to create pci config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_npar_config)) + dev_info(dev, "failed to create npar config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_esw_config)) + dev_info(dev, "failed to create esw config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_pm_config)) + dev_info(dev, "failed to create pm config sysfs entry"); +} static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return; device_remove_file(dev, &dev_attr_diag_mode); device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || + adapter->op_mode != QLCNIC_MGMT_FUNC) + return; + device_remove_bin_file(dev, &bin_attr_pci_config); + device_remove_bin_file(dev, &bin_attr_npar_config); + device_remove_bin_file(dev, &bin_attr_esw_config); + device_remove_bin_file(dev, &bin_attr_pm_config); } #ifdef CONFIG_INET @@ -2684,7 +3532,7 @@ static int qlcnic_netdev_event(struct notifier_block *this, if (!adapter) goto done; - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto done; qlcnic_config_indev_addr(dev, event); @@ -2720,7 +3568,7 @@ qlcnic_inetaddr_event(struct notifier_block *this, if (!adapter) goto done; - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto done; switch (event) { @@ -2750,6 +3598,11 @@ static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long event) { } #endif +static struct pci_error_handlers qlcnic_err_handler = { + .error_detected = qlcnic_io_error_detected, + .slot_reset = qlcnic_io_slot_reset, + .resume = qlcnic_io_resume, +}; static struct pci_driver qlcnic_driver = { .name = qlcnic_driver_name, @@ -2760,11 +3613,14 @@ static struct pci_driver qlcnic_driver = { .suspend = qlcnic_suspend, .resume = qlcnic_resume, #endif - .shutdown = qlcnic_shutdown + .shutdown = qlcnic_shutdown, + .err_handler = &qlcnic_err_handler + }; static int __init qlcnic_init_module(void) { + int ret; printk(KERN_INFO "%s\n", qlcnic_driver_string); @@ -2773,8 +3629,15 @@ static int __init qlcnic_init_module(void) register_inetaddr_notifier(&qlcnic_inetaddr_cb); #endif + ret = pci_register_driver(&qlcnic_driver); + if (ret) { +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); + unregister_netdevice_notifier(&qlcnic_netdev_cb); +#endif + } - return pci_register_driver(&qlcnic_driver); + return ret; } module_init(qlcnic_init_module); diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 20624ba44a37be2bad8f37206588f29b63bd24ce..a478786840a65e8f35a5a825263926fd0f7078d2 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -16,9 +16,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00.23.00.00-01" - -#define PFX "qlge: " +#define DRV_VERSION "v1.00.00.25.00.00-01" #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ @@ -1062,7 +1060,7 @@ struct tx_buf_desc { #define TX_DESC_LEN_MASK 0x000fffff #define TX_DESC_C 0x40000000 #define TX_DESC_E 0x80000000 -} __attribute((packed)); +} __packed; /* * IOCB Definitions... @@ -1095,7 +1093,7 @@ struct ob_mac_iocb_req { __le16 vlan_tci; __le16 reserved4; struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __attribute((packed)); +} __packed; struct ob_mac_iocb_rsp { u8 opcode; /* */ @@ -1112,7 +1110,7 @@ struct ob_mac_iocb_rsp { u32 tid; u32 txq_idx; __le32 reserved[13]; -} __attribute((packed)); +} __packed; struct ob_mac_tso_iocb_req { u8 opcode; @@ -1140,7 +1138,7 @@ struct ob_mac_tso_iocb_req { __le16 vlan_tci; __le16 mss; struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __attribute((packed)); +} __packed; struct ob_mac_tso_iocb_rsp { u8 opcode; @@ -1157,7 +1155,7 @@ struct ob_mac_tso_iocb_rsp { u32 tid; u32 txq_idx; __le32 reserved2[13]; -} __attribute((packed)); +} __packed; struct ib_mac_iocb_rsp { u8 opcode; /* 0x20 */ @@ -1216,7 +1214,7 @@ struct ib_mac_iocb_rsp { #define IB_MAC_IOCB_RSP_HL 0x80 __le32 hdr_len; /* */ __le64 hdr_addr; /* */ -} __attribute((packed)); +} __packed; struct ib_ae_iocb_rsp { u8 opcode; @@ -1237,7 +1235,7 @@ struct ib_ae_iocb_rsp { #define PCI_ERR_ANON_BUF_RD 0x40 u8 q_id; __le32 reserved[15]; -} __attribute((packed)); +} __packed; /* * These three structures are for generic @@ -1249,7 +1247,7 @@ struct ql_net_rsp_iocb { __le16 length; __le32 tid; __le32 reserved[14]; -} __attribute((packed)); +} __packed; struct net_req_iocb { u8 opcode; @@ -1257,7 +1255,7 @@ struct net_req_iocb { __le16 flags1; __le32 tid; __le32 reserved1[30]; -} __attribute((packed)); +} __packed; /* * tx ring initialization control block for chip. @@ -1283,7 +1281,7 @@ struct wqicb { __le16 rid; __le64 addr; __le64 cnsmr_idx_addr; -} __attribute((packed)); +} __packed; /* * rx ring initialization control block for chip. @@ -1317,7 +1315,7 @@ struct cqicb { __le64 sbq_addr; __le16 sbq_buf_size; __le16 sbq_len; /* entry count */ -} __attribute((packed)); +} __packed; struct ricb { u8 base_cq; @@ -1335,7 +1333,7 @@ struct ricb { u8 hash_cq_id[1024]; __le32 ipv6_hash_key[10]; __le32 ipv4_hash_key[4]; -} __attribute((packed)); +} __packed; /* SOFTWARE/DRIVER DATA STRUCTURES. */ @@ -2227,7 +2225,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr, int word_count); int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump); -int ql_mb_sys_err(struct ql_adapter *qdev); int ql_mb_about_fw(struct ql_adapter *qdev); int ql_wol(struct ql_adapter *qdev); int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); @@ -2246,6 +2243,7 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); int ql_own_firmware(struct ql_adapter *qdev); int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); +void qlge_set_multicast_list(struct net_device *ndev); #if 1 #define QL_ALL_DUMP diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c index 68a1c9b91e74d199fbb6a4cce1a604ef09eea230..4747492935ef8bf190cc575fe7146e3072417623 100644 --- a/drivers/net/qlge/qlge_dbg.c +++ b/drivers/net/qlge/qlge_dbg.c @@ -1,3 +1,5 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include "qlge.h" @@ -446,7 +448,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) MAC_ADDR_TYPE_CAM_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register.\n"); + "Failed read of mac index register\n"); goto err; } *buf++ = value[0]; /* lower MAC address */ @@ -458,7 +460,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) MAC_ADDR_TYPE_MULTI_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register.\n"); + "Failed read of mac index register\n"); goto err; } *buf++ = value[0]; /* lower Mcast address */ @@ -482,7 +484,7 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) status = ql_get_routing_reg(qdev, i, &value); if (status) { netif_err(qdev, drv, qdev->ndev, - "Failed read of routing index register.\n"); + "Failed read of routing index register\n"); goto err; } else { *buf++ = value; @@ -668,7 +670,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; break; default: - printk(KERN_ERR"Bad type!!! 0x%08x\n", type); + pr_err("Bad type!!! 0x%08x\n", type); max_index = 0; max_offset = 0; break; @@ -738,7 +740,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) int i; if (!mpi_coredump) { - netif_err(qdev, drv, qdev->ndev, "No memory available.\n"); + netif_err(qdev, drv, qdev->ndev, "No memory available\n"); return -ENOMEM; } @@ -1234,15 +1236,10 @@ static void ql_get_core_dump(struct ql_adapter *qdev) if (!netif_running(qdev->ndev)) { netif_err(qdev, ifup, qdev->ndev, - "Force Coredump can only be done from interface that is up.\n"); - return; - } - - if (ql_mb_sys_err(qdev)) { - netif_err(qdev, ifup, qdev->ndev, - "Fail force coredump with ql_mb_sys_err().\n"); + "Force Coredump can only be done from interface that is up\n"); return; } + ql_queue_fw_error(qdev); } void ql_gen_reg_dump(struct ql_adapter *qdev, @@ -1339,7 +1336,7 @@ void ql_mpi_core_to_log(struct work_struct *work) "Core is dumping to log file!\n"); for (i = 0; i < count; i += 8) { - printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " + pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " "%.08x %.08x %.08x\n", i, tmp[i + 0], tmp[i + 1], @@ -1361,71 +1358,43 @@ static void ql_dump_intr_states(struct ql_adapter *qdev) for (i = 0; i < qdev->intr_count; i++) { ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); value = ql_read32(qdev, INTR_EN); - printk(KERN_ERR PFX - "%s: Interrupt %d is %s.\n", + pr_err("%s: Interrupt %d is %s\n", qdev->ndev->name, i, (value & INTR_EN_EN ? "enabled" : "disabled")); } } +#define DUMP_XGMAC(qdev, reg) \ +do { \ + u32 data; \ + ql_read_xgmac_reg(qdev, reg, &data); \ + pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ +} while (0) + void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) { - u32 data; if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__); + pr_err("%s: Couldn't get xgmac sem\n", __func__); return; } - ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data); - printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data); - printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); - printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, TX_CFG, &data); - printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data); - ql_read_xgmac_reg(qdev, RX_CFG, &data); - printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data); - ql_read_xgmac_reg(qdev, FLOW_CTL, &data); - printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data); - printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data); - printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data); - printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n", - qdev->ndev->name, data); - ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data); - printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n", - qdev->ndev->name, data); - ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data); - printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data); - printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data); - printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data); - printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n", - qdev->ndev->name, data); - ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data); - printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name, - data); - ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data); - printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n", - qdev->ndev->name, data); - ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data); - printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name, - data); + DUMP_XGMAC(qdev, PAUSE_SRC_LO); + DUMP_XGMAC(qdev, PAUSE_SRC_HI); + DUMP_XGMAC(qdev, GLOBAL_CFG); + DUMP_XGMAC(qdev, TX_CFG); + DUMP_XGMAC(qdev, RX_CFG); + DUMP_XGMAC(qdev, FLOW_CTL); + DUMP_XGMAC(qdev, PAUSE_OPCODE); + DUMP_XGMAC(qdev, PAUSE_TIMER); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); + DUMP_XGMAC(qdev, MAC_TX_PARAMS); + DUMP_XGMAC(qdev, MAC_RX_PARAMS); + DUMP_XGMAC(qdev, MAC_SYS_INT); + DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); + DUMP_XGMAC(qdev, MAC_MGMT_INT); + DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); + DUMP_XGMAC(qdev, EXT_ARB_MODE); ql_sem_unlock(qdev, qdev->xg_sem_mask); - } static void ql_dump_ets_regs(struct ql_adapter *qdev) @@ -1442,14 +1411,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev) return; for (i = 0; i < 4; i++) { if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { - printk(KERN_ERR PFX - "%s: Failed read of mac index register.\n", + pr_err("%s: Failed read of mac index register\n", __func__); return; } else { if (value[0]) - printk(KERN_ERR PFX - "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n", + pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", qdev->ndev->name, i, value[1], value[0], value[2]); } @@ -1457,14 +1424,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev) for (i = 0; i < 32; i++) { if (ql_get_mac_addr_reg (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { - printk(KERN_ERR PFX - "%s: Failed read of mac index register.\n", + pr_err("%s: Failed read of mac index register\n", __func__); return; } else { if (value[0]) - printk(KERN_ERR PFX - "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n", + pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", qdev->ndev->name, i, value[1], value[0]); } } @@ -1481,129 +1446,77 @@ void ql_dump_routing_entries(struct ql_adapter *qdev) for (i = 0; i < 16; i++) { value = 0; if (ql_get_routing_reg(qdev, i, &value)) { - printk(KERN_ERR PFX - "%s: Failed read of routing index register.\n", + pr_err("%s: Failed read of routing index register\n", __func__); return; } else { if (value) - printk(KERN_ERR PFX - "%s: Routing Mask %d = 0x%.08x.\n", + pr_err("%s: Routing Mask %d = 0x%.08x\n", qdev->ndev->name, i, value); } } ql_sem_unlock(qdev, SEM_RT_IDX_MASK); } +#define DUMP_REG(qdev, reg) \ + pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) + void ql_dump_regs(struct ql_adapter *qdev) { - printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func); - printk(KERN_ERR PFX "SYS = 0x%x.\n", - ql_read32(qdev, SYS)); - printk(KERN_ERR PFX "RST_FO = 0x%x.\n", - ql_read32(qdev, RST_FO)); - printk(KERN_ERR PFX "FSC = 0x%x.\n", - ql_read32(qdev, FSC)); - printk(KERN_ERR PFX "CSR = 0x%x.\n", - ql_read32(qdev, CSR)); - printk(KERN_ERR PFX "ICB_RID = 0x%x.\n", - ql_read32(qdev, ICB_RID)); - printk(KERN_ERR PFX "ICB_L = 0x%x.\n", - ql_read32(qdev, ICB_L)); - printk(KERN_ERR PFX "ICB_H = 0x%x.\n", - ql_read32(qdev, ICB_H)); - printk(KERN_ERR PFX "CFG = 0x%x.\n", - ql_read32(qdev, CFG)); - printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n", - ql_read32(qdev, BIOS_ADDR)); - printk(KERN_ERR PFX "STS = 0x%x.\n", - ql_read32(qdev, STS)); - printk(KERN_ERR PFX "INTR_EN = 0x%x.\n", - ql_read32(qdev, INTR_EN)); - printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n", - ql_read32(qdev, INTR_MASK)); - printk(KERN_ERR PFX "ISR1 = 0x%x.\n", - ql_read32(qdev, ISR1)); - printk(KERN_ERR PFX "ISR2 = 0x%x.\n", - ql_read32(qdev, ISR2)); - printk(KERN_ERR PFX "ISR3 = 0x%x.\n", - ql_read32(qdev, ISR3)); - printk(KERN_ERR PFX "ISR4 = 0x%x.\n", - ql_read32(qdev, ISR4)); - printk(KERN_ERR PFX "REV_ID = 0x%x.\n", - ql_read32(qdev, REV_ID)); - printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n", - ql_read32(qdev, FRC_ECC_ERR)); - printk(KERN_ERR PFX "ERR_STS = 0x%x.\n", - ql_read32(qdev, ERR_STS)); - printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n", - ql_read32(qdev, RAM_DBG_ADDR)); - printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n", - ql_read32(qdev, RAM_DBG_DATA)); - printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n", - ql_read32(qdev, ECC_ERR_CNT)); - printk(KERN_ERR PFX "SEM = 0x%x.\n", - ql_read32(qdev, SEM)); - printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n", - ql_read32(qdev, GPIO_1)); - printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n", - ql_read32(qdev, GPIO_2)); - printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n", - ql_read32(qdev, GPIO_3)); - printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n", - ql_read32(qdev, XGMAC_ADDR)); - printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n", - ql_read32(qdev, XGMAC_DATA)); - printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n", - ql_read32(qdev, NIC_ETS)); - printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n", - ql_read32(qdev, CNA_ETS)); - printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n", - ql_read32(qdev, FLASH_ADDR)); - printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n", - ql_read32(qdev, FLASH_DATA)); - printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n", - ql_read32(qdev, CQ_STOP)); - printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n", - ql_read32(qdev, PAGE_TBL_RID)); - printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n", - ql_read32(qdev, WQ_PAGE_TBL_LO)); - printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n", - ql_read32(qdev, WQ_PAGE_TBL_HI)); - printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n", - ql_read32(qdev, CQ_PAGE_TBL_LO)); - printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n", - ql_read32(qdev, CQ_PAGE_TBL_HI)); - printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n", - ql_read32(qdev, COS_DFLT_CQ1)); - printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n", - ql_read32(qdev, COS_DFLT_CQ2)); - printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n", - ql_read32(qdev, SPLT_HDR)); - printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n", - ql_read32(qdev, FC_PAUSE_THRES)); - printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n", - ql_read32(qdev, NIC_PAUSE_THRES)); - printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n", - ql_read32(qdev, FC_ETHERTYPE)); - printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n", - ql_read32(qdev, FC_RCV_CFG)); - printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n", - ql_read32(qdev, NIC_RCV_CFG)); - printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n", - ql_read32(qdev, FC_COS_TAGS)); - printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n", - ql_read32(qdev, NIC_COS_TAGS)); - printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n", - ql_read32(qdev, MGMT_RCV_CFG)); - printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n", - ql_read32(qdev, XG_SERDES_ADDR)); - printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n", - ql_read32(qdev, XG_SERDES_DATA)); - printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n", - ql_read32(qdev, PRB_MX_ADDR)); - printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n", - ql_read32(qdev, PRB_MX_DATA)); + pr_err("reg dump for function #%d\n", qdev->func); + DUMP_REG(qdev, SYS); + DUMP_REG(qdev, RST_FO); + DUMP_REG(qdev, FSC); + DUMP_REG(qdev, CSR); + DUMP_REG(qdev, ICB_RID); + DUMP_REG(qdev, ICB_L); + DUMP_REG(qdev, ICB_H); + DUMP_REG(qdev, CFG); + DUMP_REG(qdev, BIOS_ADDR); + DUMP_REG(qdev, STS); + DUMP_REG(qdev, INTR_EN); + DUMP_REG(qdev, INTR_MASK); + DUMP_REG(qdev, ISR1); + DUMP_REG(qdev, ISR2); + DUMP_REG(qdev, ISR3); + DUMP_REG(qdev, ISR4); + DUMP_REG(qdev, REV_ID); + DUMP_REG(qdev, FRC_ECC_ERR); + DUMP_REG(qdev, ERR_STS); + DUMP_REG(qdev, RAM_DBG_ADDR); + DUMP_REG(qdev, RAM_DBG_DATA); + DUMP_REG(qdev, ECC_ERR_CNT); + DUMP_REG(qdev, SEM); + DUMP_REG(qdev, GPIO_1); + DUMP_REG(qdev, GPIO_2); + DUMP_REG(qdev, GPIO_3); + DUMP_REG(qdev, XGMAC_ADDR); + DUMP_REG(qdev, XGMAC_DATA); + DUMP_REG(qdev, NIC_ETS); + DUMP_REG(qdev, CNA_ETS); + DUMP_REG(qdev, FLASH_ADDR); + DUMP_REG(qdev, FLASH_DATA); + DUMP_REG(qdev, CQ_STOP); + DUMP_REG(qdev, PAGE_TBL_RID); + DUMP_REG(qdev, WQ_PAGE_TBL_LO); + DUMP_REG(qdev, WQ_PAGE_TBL_HI); + DUMP_REG(qdev, CQ_PAGE_TBL_LO); + DUMP_REG(qdev, CQ_PAGE_TBL_HI); + DUMP_REG(qdev, COS_DFLT_CQ1); + DUMP_REG(qdev, COS_DFLT_CQ2); + DUMP_REG(qdev, SPLT_HDR); + DUMP_REG(qdev, FC_PAUSE_THRES); + DUMP_REG(qdev, NIC_PAUSE_THRES); + DUMP_REG(qdev, FC_ETHERTYPE); + DUMP_REG(qdev, FC_RCV_CFG); + DUMP_REG(qdev, NIC_RCV_CFG); + DUMP_REG(qdev, FC_COS_TAGS); + DUMP_REG(qdev, NIC_COS_TAGS); + DUMP_REG(qdev, MGMT_RCV_CFG); + DUMP_REG(qdev, XG_SERDES_ADDR); + DUMP_REG(qdev, XG_SERDES_DATA); + DUMP_REG(qdev, PRB_MX_ADDR); + DUMP_REG(qdev, PRB_MX_DATA); ql_dump_intr_states(qdev); ql_dump_xgmac_control_regs(qdev); ql_dump_ets_regs(qdev); @@ -1613,191 +1526,124 @@ void ql_dump_regs(struct ql_adapter *qdev) #endif #ifdef QL_STAT_DUMP + +#define DUMP_STAT(qdev, stat) \ + pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) + void ql_dump_stat(struct ql_adapter *qdev) { - printk(KERN_ERR "%s: Enter.\n", __func__); - printk(KERN_ERR "tx_pkts = %ld\n", - (unsigned long)qdev->nic_stats.tx_pkts); - printk(KERN_ERR "tx_bytes = %ld\n", - (unsigned long)qdev->nic_stats.tx_bytes); - printk(KERN_ERR "tx_mcast_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.tx_mcast_pkts); - printk(KERN_ERR "tx_bcast_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.tx_bcast_pkts); - printk(KERN_ERR "tx_ucast_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.tx_ucast_pkts); - printk(KERN_ERR "tx_ctl_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.tx_ctl_pkts); - printk(KERN_ERR "tx_pause_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.tx_pause_pkts); - printk(KERN_ERR "tx_64_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_64_pkt); - printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_65_to_127_pkt); - printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_128_to_255_pkt); - printk(KERN_ERR "tx_256_511_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_256_511_pkt); - printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt); - printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt); - printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt); - printk(KERN_ERR "tx_undersize_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_undersize_pkt); - printk(KERN_ERR "tx_oversize_pkt = %ld.\n", - (unsigned long)qdev->nic_stats.tx_oversize_pkt); - printk(KERN_ERR "rx_bytes = %ld.\n", - (unsigned long)qdev->nic_stats.rx_bytes); - printk(KERN_ERR "rx_bytes_ok = %ld.\n", - (unsigned long)qdev->nic_stats.rx_bytes_ok); - printk(KERN_ERR "rx_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_pkts); - printk(KERN_ERR "rx_pkts_ok = %ld.\n", - (unsigned long)qdev->nic_stats.rx_pkts_ok); - printk(KERN_ERR "rx_bcast_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_bcast_pkts); - printk(KERN_ERR "rx_mcast_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_mcast_pkts); - printk(KERN_ERR "rx_ucast_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_ucast_pkts); - printk(KERN_ERR "rx_undersize_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_undersize_pkts); - printk(KERN_ERR "rx_oversize_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_oversize_pkts); - printk(KERN_ERR "rx_jabber_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_jabber_pkts); - printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts); - printk(KERN_ERR "rx_drop_events = %ld.\n", - (unsigned long)qdev->nic_stats.rx_drop_events); - printk(KERN_ERR "rx_fcerr_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_fcerr_pkts); - printk(KERN_ERR "rx_align_err = %ld.\n", - (unsigned long)qdev->nic_stats.rx_align_err); - printk(KERN_ERR "rx_symbol_err = %ld.\n", - (unsigned long)qdev->nic_stats.rx_symbol_err); - printk(KERN_ERR "rx_mac_err = %ld.\n", - (unsigned long)qdev->nic_stats.rx_mac_err); - printk(KERN_ERR "rx_ctl_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_ctl_pkts); - printk(KERN_ERR "rx_pause_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_pause_pkts); - printk(KERN_ERR "rx_64_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_64_pkts); - printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_65_to_127_pkts); - printk(KERN_ERR "rx_128_255_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_128_255_pkts); - printk(KERN_ERR "rx_256_511_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_256_511_pkts); - printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts); - printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts); - printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts); - printk(KERN_ERR "rx_len_err_pkts = %ld.\n", - (unsigned long)qdev->nic_stats.rx_len_err_pkts); + pr_err("%s: Enter\n", __func__); + DUMP_STAT(qdev, tx_pkts); + DUMP_STAT(qdev, tx_bytes); + DUMP_STAT(qdev, tx_mcast_pkts); + DUMP_STAT(qdev, tx_bcast_pkts); + DUMP_STAT(qdev, tx_ucast_pkts); + DUMP_STAT(qdev, tx_ctl_pkts); + DUMP_STAT(qdev, tx_pause_pkts); + DUMP_STAT(qdev, tx_64_pkt); + DUMP_STAT(qdev, tx_65_to_127_pkt); + DUMP_STAT(qdev, tx_128_to_255_pkt); + DUMP_STAT(qdev, tx_256_511_pkt); + DUMP_STAT(qdev, tx_512_to_1023_pkt); + DUMP_STAT(qdev, tx_1024_to_1518_pkt); + DUMP_STAT(qdev, tx_1519_to_max_pkt); + DUMP_STAT(qdev, tx_undersize_pkt); + DUMP_STAT(qdev, tx_oversize_pkt); + DUMP_STAT(qdev, rx_bytes); + DUMP_STAT(qdev, rx_bytes_ok); + DUMP_STAT(qdev, rx_pkts); + DUMP_STAT(qdev, rx_pkts_ok); + DUMP_STAT(qdev, rx_bcast_pkts); + DUMP_STAT(qdev, rx_mcast_pkts); + DUMP_STAT(qdev, rx_ucast_pkts); + DUMP_STAT(qdev, rx_undersize_pkts); + DUMP_STAT(qdev, rx_oversize_pkts); + DUMP_STAT(qdev, rx_jabber_pkts); + DUMP_STAT(qdev, rx_undersize_fcerr_pkts); + DUMP_STAT(qdev, rx_drop_events); + DUMP_STAT(qdev, rx_fcerr_pkts); + DUMP_STAT(qdev, rx_align_err); + DUMP_STAT(qdev, rx_symbol_err); + DUMP_STAT(qdev, rx_mac_err); + DUMP_STAT(qdev, rx_ctl_pkts); + DUMP_STAT(qdev, rx_pause_pkts); + DUMP_STAT(qdev, rx_64_pkts); + DUMP_STAT(qdev, rx_65_to_127_pkts); + DUMP_STAT(qdev, rx_128_255_pkts); + DUMP_STAT(qdev, rx_256_511_pkts); + DUMP_STAT(qdev, rx_512_to_1023_pkts); + DUMP_STAT(qdev, rx_1024_to_1518_pkts); + DUMP_STAT(qdev, rx_1519_to_max_pkts); + DUMP_STAT(qdev, rx_len_err_pkts); }; #endif #ifdef QL_DEV_DUMP + +#define DUMP_QDEV_FIELD(qdev, type, field) \ + pr_err("qdev->%-24s = " type "\n", #field, qdev->field) +#define DUMP_QDEV_DMA_FIELD(qdev, field) \ + pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) +#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ + pr_err("%s[%d].%s = " type "\n", \ + #array, index, #field, qdev->array[index].field); void ql_dump_qdev(struct ql_adapter *qdev) { int i; - printk(KERN_ERR PFX "qdev->flags = %lx.\n", - qdev->flags); - printk(KERN_ERR PFX "qdev->vlgrp = %p.\n", - qdev->vlgrp); - printk(KERN_ERR PFX "qdev->pdev = %p.\n", - qdev->pdev); - printk(KERN_ERR PFX "qdev->ndev = %p.\n", - qdev->ndev); - printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n", - qdev->chip_rev_id); - printk(KERN_ERR PFX "qdev->reg_base = %p.\n", - qdev->reg_base); - printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n", - qdev->doorbell_area); - printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n", - qdev->doorbell_area_size); - printk(KERN_ERR PFX "msg_enable = %x.\n", - qdev->msg_enable); - printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n", - qdev->rx_ring_shadow_reg_area); - printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n", - (unsigned long long) qdev->rx_ring_shadow_reg_dma); - printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n", - qdev->tx_ring_shadow_reg_area); - printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n", - (unsigned long long) qdev->tx_ring_shadow_reg_dma); - printk(KERN_ERR PFX "qdev->intr_count = %d.\n", - qdev->intr_count); + DUMP_QDEV_FIELD(qdev, "%lx", flags); + DUMP_QDEV_FIELD(qdev, "%p", vlgrp); + DUMP_QDEV_FIELD(qdev, "%p", pdev); + DUMP_QDEV_FIELD(qdev, "%p", ndev); + DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); + DUMP_QDEV_FIELD(qdev, "%p", reg_base); + DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); + DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); + DUMP_QDEV_FIELD(qdev, "%x", msg_enable); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); if (qdev->msi_x_entry) for (i = 0; i < qdev->intr_count; i++) { - printk(KERN_ERR PFX - "msi_x_entry.[%d]vector = %d.\n", i, - qdev->msi_x_entry[i].vector); - printk(KERN_ERR PFX - "msi_x_entry.[%d]entry = %d.\n", i, - qdev->msi_x_entry[i].entry); + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); } for (i = 0; i < qdev->intr_count; i++) { - printk(KERN_ERR PFX - "intr_context[%d].qdev = %p.\n", i, - qdev->intr_context[i].qdev); - printk(KERN_ERR PFX - "intr_context[%d].intr = %d.\n", i, - qdev->intr_context[i].intr); - printk(KERN_ERR PFX - "intr_context[%d].hooked = %d.\n", i, - qdev->intr_context[i].hooked); - printk(KERN_ERR PFX - "intr_context[%d].intr_en_mask = 0x%08x.\n", i, - qdev->intr_context[i].intr_en_mask); - printk(KERN_ERR PFX - "intr_context[%d].intr_dis_mask = 0x%08x.\n", i, - qdev->intr_context[i].intr_dis_mask); - printk(KERN_ERR PFX - "intr_context[%d].intr_read_mask = 0x%08x.\n", i, - qdev->intr_context[i].intr_read_mask); + DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); } - printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count); - printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count); - printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size); - printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem); - printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count); - printk(KERN_ERR PFX "qdev->tx_ring = %p.\n", - qdev->tx_ring); - printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n", - qdev->rss_ring_count); - printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring); - printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n", - qdev->default_rx_queue); - printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n", - qdev->xg_sem_mask); - printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n", - qdev->port_link_up); - printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n", - qdev->port_init); - + DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); + DUMP_QDEV_FIELD(qdev, "%p", ring_mem); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring); + DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring); + DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); + DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); } #endif #ifdef QL_CB_DUMP void ql_dump_wqicb(struct wqicb *wqicb) { - printk(KERN_ERR PFX "Dumping wqicb stuff...\n"); - printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len)); - printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags)); - printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", + pr_err("Dumping wqicb stuff...\n"); + pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); + pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); + pr_err("wqicb->cq_id_rss = %d\n", le16_to_cpu(wqicb->cq_id_rss)); - printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); - printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n", + pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); + pr_err("wqicb->wq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(wqicb->addr)); - printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n", + pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); } @@ -1805,40 +1651,34 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring) { if (tx_ring == NULL) return; - printk(KERN_ERR PFX - "===================== Dumping tx_ring %d ===============.\n", + pr_err("===================== Dumping tx_ring %d ===============\n", tx_ring->wq_id); - printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); - printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", + pr_err("tx_ring->base = %p\n", tx_ring->wq_base); + pr_err("tx_ring->base_dma = 0x%llx\n", (unsigned long long) tx_ring->wq_base_dma); - printk(KERN_ERR PFX - "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n", + pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", tx_ring->cnsmr_idx_sh_reg, tx_ring->cnsmr_idx_sh_reg ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); - printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); - printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); - printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", - tx_ring->prod_idx_db_reg); - printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n", - tx_ring->valid_db_reg); - printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx); - printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id); - printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id); - printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q); - printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n", - atomic_read(&tx_ring->tx_count)); + pr_err("tx_ring->size = %d\n", tx_ring->wq_size); + pr_err("tx_ring->len = %d\n", tx_ring->wq_len); + pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); + pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); + pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); + pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); + pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); + pr_err("tx_ring->q = %p\n", tx_ring->q); + pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); } void ql_dump_ricb(struct ricb *ricb) { int i; - printk(KERN_ERR PFX - "===================== Dumping ricb ===============.\n"); - printk(KERN_ERR PFX "Dumping ricb stuff...\n"); + pr_err("===================== Dumping ricb ===============\n"); + pr_err("Dumping ricb stuff...\n"); - printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f); - printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n", + pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); + pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", ricb->flags & RSS_L6K ? "RSS_L6K " : "", ricb->flags & RSS_LI ? "RSS_LI " : "", @@ -1848,44 +1688,44 @@ void ql_dump_ricb(struct ricb *ricb) ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); - printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask)); + pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); for (i = 0; i < 16; i++) - printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i, + pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->hash_cq_id[i])); for (i = 0; i < 10; i++) - printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i, + pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->ipv6_hash_key[i])); for (i = 0; i < 4; i++) - printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i, + pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->ipv4_hash_key[i])); } void ql_dump_cqicb(struct cqicb *cqicb) { - printk(KERN_ERR PFX "Dumping cqicb stuff...\n"); + pr_err("Dumping cqicb stuff...\n"); - printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); - printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); - printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); - printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n", + pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); + pr_err("cqicb->flags = %x\n", cqicb->flags); + pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); + pr_err("cqicb->addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->addr)); - printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n", + pr_err("cqicb->prod_idx_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); - printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", + pr_err("cqicb->pkt_delay = 0x%.04x\n", le16_to_cpu(cqicb->pkt_delay)); - printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", + pr_err("cqicb->irq_delay = 0x%.04x\n", le16_to_cpu(cqicb->irq_delay)); - printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n", + pr_err("cqicb->lbq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); - printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", + pr_err("cqicb->lbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->lbq_buf_size)); - printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", + pr_err("cqicb->lbq_len = 0x%.04x\n", le16_to_cpu(cqicb->lbq_len)); - printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n", + pr_err("cqicb->sbq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); - printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", + pr_err("cqicb->sbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->sbq_buf_size)); - printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", + pr_err("cqicb->sbq_len = 0x%.04x\n", le16_to_cpu(cqicb->sbq_len)); } @@ -1893,100 +1733,85 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) { if (rx_ring == NULL) return; - printk(KERN_ERR PFX - "===================== Dumping rx_ring %d ===============.\n", + pr_err("===================== Dumping rx_ring %d ===============\n", rx_ring->cq_id); - printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n", + pr_err("Dumping rx_ring %d, type = %s%s%s\n", rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); - printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb); - printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base); - printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n", + pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); + pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); + pr_err("rx_ring->cq_base_dma = %llx\n", (unsigned long long) rx_ring->cq_base_dma); - printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); - printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); - printk(KERN_ERR PFX - "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n", + pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); + pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); + pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); - printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", + pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", (unsigned long long) rx_ring->prod_idx_sh_reg_dma); - printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", + pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", rx_ring->cnsmr_idx_db_reg); - printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx); - printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry); - printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n", - rx_ring->valid_db_reg); + pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); + pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); + pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); - printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base); - printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n", + pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); + pr_err("rx_ring->lbq_base_dma = %llx\n", (unsigned long long) rx_ring->lbq_base_dma); - printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n", + pr_err("rx_ring->lbq_base_indirect = %p\n", rx_ring->lbq_base_indirect); - printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n", + pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", (unsigned long long) rx_ring->lbq_base_indirect_dma); - printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq); - printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len); - printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size); - printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n", + pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); + pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); + pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); + pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", rx_ring->lbq_prod_idx_db_reg); - printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n", - rx_ring->lbq_prod_idx); - printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n", - rx_ring->lbq_curr_idx); - printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n", - rx_ring->lbq_clean_idx); - printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n", - rx_ring->lbq_free_cnt); - printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n", - rx_ring->lbq_buf_size); - - printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base); - printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n", + pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); + pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); + pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); + pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); + pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); + + pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); + pr_err("rx_ring->sbq_base_dma = %llx\n", (unsigned long long) rx_ring->sbq_base_dma); - printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n", + pr_err("rx_ring->sbq_base_indirect = %p\n", rx_ring->sbq_base_indirect); - printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n", + pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", (unsigned long long) rx_ring->sbq_base_indirect_dma); - printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq); - printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len); - printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size); - printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n", + pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); + pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); + pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); + pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", rx_ring->sbq_prod_idx_db_reg); - printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n", - rx_ring->sbq_prod_idx); - printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n", - rx_ring->sbq_curr_idx); - printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n", - rx_ring->sbq_clean_idx); - printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n", - rx_ring->sbq_free_cnt); - printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n", - rx_ring->sbq_buf_size); - printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id); - printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq); - printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu); - printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev); + pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); + pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); + pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); + pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); + pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); + pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); + pr_err("rx_ring->irq = %d\n", rx_ring->irq); + pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); + pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); } void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) { void *ptr; - printk(KERN_ERR PFX "%s: Enter.\n", __func__); + pr_err("%s: Enter\n", __func__); ptr = kmalloc(size, GFP_ATOMIC); if (ptr == NULL) { - printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n", - __func__); + pr_err("%s: Couldn't allocate a buffer\n", __func__); return; } if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { - printk(KERN_ERR "%s: Failed to upload control block!\n", - __func__); + pr_err("%s: Failed to upload control block!\n", __func__); goto fail_it; } switch (bit) { @@ -2000,8 +1825,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) ql_dump_ricb((struct ricb *)ptr); break; default: - printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n", - __func__, bit); + pr_err("%s: Invalid bit value = %x\n", __func__, bit); break; } fail_it: @@ -2012,27 +1836,27 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) #ifdef QL_OB_DUMP void ql_dump_tx_desc(struct tx_buf_desc *tbd) { - printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", + pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); - printk(KERN_ERR PFX "tbd->len = %d\n", + pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - printk(KERN_ERR PFX "tbd->flags = %s %s\n", + pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); tbd++; - printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", + pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); - printk(KERN_ERR PFX "tbd->len = %d\n", + pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - printk(KERN_ERR PFX "tbd->flags = %s %s\n", + pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); tbd++; - printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", + pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); - printk(KERN_ERR PFX "tbd->len = %d\n", + pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - printk(KERN_ERR PFX "tbd->flags = %s %s\n", + pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); @@ -2045,38 +1869,38 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) struct tx_buf_desc *tbd; u16 frame_len; - printk(KERN_ERR PFX "%s\n", __func__); - printk(KERN_ERR PFX "opcode = %s\n", + pr_err("%s\n", __func__); + pr_err("opcode = %s\n", (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); - printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n", + pr_err("flags1 = %s %s %s %s %s\n", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); - printk(KERN_ERR PFX "flags2 = %s %s %s\n", + pr_err("flags2 = %s %s %s\n", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); - printk(KERN_ERR PFX "flags3 = %s %s %s\n", + pr_err("flags3 = %s %s %s\n", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); - printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid); - printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx); - printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); + pr_err("tid = %x\n", ob_mac_iocb->tid); + pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); + pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { - printk(KERN_ERR PFX "frame_len = %d\n", + pr_err("frame_len = %d\n", le32_to_cpu(ob_mac_tso_iocb->frame_len)); - printk(KERN_ERR PFX "mss = %d\n", + pr_err("mss = %d\n", le16_to_cpu(ob_mac_tso_iocb->mss)); - printk(KERN_ERR PFX "prot_hdr_len = %d\n", + pr_err("prot_hdr_len = %d\n", le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); - printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n", + pr_err("hdr_offset = 0x%.04x\n", le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); } else { - printk(KERN_ERR PFX "frame_len = %d\n", + pr_err("frame_len = %d\n", le16_to_cpu(ob_mac_iocb->frame_len)); frame_len = le16_to_cpu(ob_mac_iocb->frame_len); } @@ -2086,9 +1910,9 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) { - printk(KERN_ERR PFX "%s\n", __func__); - printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode); - printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n", + pr_err("%s\n", __func__); + pr_err("opcode = %d\n", ob_mac_rsp->opcode); + pr_err("flags = %s %s %s %s %s %s %s\n", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", @@ -2096,16 +1920,16 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); - printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid); + pr_err("tid = %x\n", ob_mac_rsp->tid); } #endif #ifdef QL_IB_DUMP void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) { - printk(KERN_ERR PFX "%s\n", __func__); - printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode); - printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n", + pr_err("%s\n", __func__); + pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); + pr_err("flags1 = %s%s%s%s%s%s\n", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", @@ -2114,7 +1938,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) - printk(KERN_ERR PFX "%s%s%s Multicast.\n", + pr_err("%s%s%s Multicast\n", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == @@ -2122,7 +1946,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n", + pr_err("flags2 = %s%s%s%s%s\n", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", @@ -2130,7 +1954,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) - printk(KERN_ERR PFX "%s%s%s%s%s error.\n", + pr_err("%s%s%s%s%s error\n", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == @@ -2142,12 +1966,12 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); - printk(KERN_ERR PFX "flags3 = %s%s.\n", + pr_err("flags3 = %s%s\n", ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n", + pr_err("RSS flags = %s%s%s%s\n", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == @@ -2157,26 +1981,26 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); - printk(KERN_ERR PFX "data_len = %d\n", + pr_err("data_len = %d\n", le32_to_cpu(ib_mac_rsp->data_len)); - printk(KERN_ERR PFX "data_addr = 0x%llx\n", + pr_err("data_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - printk(KERN_ERR PFX "rss = %x\n", + pr_err("rss = %x\n", le32_to_cpu(ib_mac_rsp->rss)); if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) - printk(KERN_ERR PFX "vlan_id = %x\n", + pr_err("vlan_id = %x\n", le16_to_cpu(ib_mac_rsp->vlan_id)); - printk(KERN_ERR PFX "flags4 = %s%s%s.\n", + pr_err("flags4 = %s%s%s\n", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { - printk(KERN_ERR PFX "hdr length = %d.\n", + pr_err("hdr length = %d\n", le32_to_cpu(ib_mac_rsp->hdr_len)); - printk(KERN_ERR PFX "hdr addr = 0x%llx.\n", + pr_err("hdr addr = 0x%llx\n", (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); } } diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index d10bcefc0e45442aae6c468b622d9c9651249e30..8d63f69b27d912de7dca94d980276963f25c7fe7 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -574,6 +574,22 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } + case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_IP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } + case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ @@ -1521,7 +1537,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_err(qdev, drv, qdev->ndev, + netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); rx_ring->rx_errors++; goto err_out; @@ -1618,7 +1634,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_err(qdev, drv, qdev->ndev, + netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); dev_kfree_skb_any(skb); rx_ring->rx_errors++; @@ -1677,7 +1693,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; if (!(iph->frag_off & - cpu_to_be16(IP_MF|IP_OFFSET))) { + ntohs(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, @@ -1939,7 +1955,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_err(qdev, drv, qdev->ndev, + netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); dev_kfree_skb_any(skb); rx_ring->rx_errors++; @@ -1997,7 +2013,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; if (!(iph->frag_off & - cpu_to_be16(IP_MF|IP_OFFSET))) { + ntohs(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); @@ -3587,10 +3603,20 @@ static int ql_route_initialize(struct ql_adapter *qdev) if (status) return status; - status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); + status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, + RT_IDX_IP_CSUM_ERR, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register " + "for IP CSUM error packets.\n"); + goto exit; + } + status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, + RT_IDX_TU_CSUM_ERR, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for error packets.\n"); + "Failed to init routing register " + "for TCP/UDP CSUM error packets.\n"); goto exit; } status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); @@ -3919,6 +3945,11 @@ static int ql_adapter_up(struct ql_adapter *qdev) if ((ql_read32(qdev, STS) & qdev->port_init) && (ql_read32(qdev, STS) & qdev->port_link_up)) ql_link_on(qdev); + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + ql_enable_interrupts(qdev); ql_enable_all_completion_interrupts(qdev); netif_tx_start_all_queues(qdev->ndev); @@ -4204,7 +4235,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device return &ndev->stats; } -static void qlge_set_multicast_list(struct net_device *ndev) +void qlge_set_multicast_list(struct net_device *ndev) { struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); struct netdev_hw_addr *ha; diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 3c00462a5d22fe8ef405878a49da7e91963c0f6e..f84e8570c7cb79d4c161e90ccee1d420b55d256c 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -606,23 +606,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) return status; } -int ql_mb_sys_err(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 0; - - mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR; - - status = ql_mailbox_command(qdev, mbcp); - return status; -} - /* Get MPI firmware version. This will be used for * driver banner and for ethtool info. * Returns zero on success. diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 9a251acf5ab8dfb42d9db1ef1f6e3810f71f98e5..142c381e1d73e0644eac3df74fea212385ad1115 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -44,12 +44,13 @@ #include #include #include +#include #include #define DRV_NAME "r6040" -#define DRV_VERSION "0.25" -#define DRV_RELDATE "20Aug2009" +#define DRV_VERSION "0.26" +#define DRV_RELDATE "30May2010" /* PHY CHIP Address */ #define PHY1_ADDR 1 /* For MAC1 */ @@ -179,7 +180,6 @@ struct r6040_descriptor { struct r6040_private { spinlock_t lock; /* driver lock */ - struct timer_list timer; struct pci_dev *pdev; struct r6040_descriptor *rx_insert_ptr; struct r6040_descriptor *rx_remove_ptr; @@ -189,13 +189,15 @@ struct r6040_private { struct r6040_descriptor *tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; - u16 tx_free_desc, phy_addr, phy_mode; + u16 tx_free_desc, phy_addr; u16 mcr0, mcr1; - u16 switch_sig; struct net_device *dev; - struct mii_if_info mii_if; + struct mii_bus *mii_bus; struct napi_struct napi; void __iomem *base; + struct phy_device *phydev; + int old_link; + int old_duplex; }; static char version[] __devinitdata = KERN_INFO DRV_NAME @@ -238,20 +240,30 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val } } -static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg) +static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) { + struct net_device *dev = bus->priv; struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - return (r6040_phy_read(ioaddr, lp->phy_addr, reg)); + return r6040_phy_read(ioaddr, phy_addr, reg); } -static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val) +static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr, + int reg, u16 value) { + struct net_device *dev = bus->priv; struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - r6040_phy_write(ioaddr, lp->phy_addr, reg, val); + r6040_phy_write(ioaddr, phy_addr, reg, value); + + return 0; +} + +static int r6040_mdiobus_reset(struct mii_bus *bus) +{ + return 0; } static void r6040_free_txbufs(struct net_device *dev) @@ -408,10 +420,9 @@ static void r6040_tx_timeout(struct net_device *dev) void __iomem *ioaddr = priv->base; netdev_warn(dev, "transmit timed out, int enable %4.4x " - "status %4.4x, PHY status %4.4x\n", + "status %4.4x\n", ioread16(ioaddr + MIER), - ioread16(ioaddr + MISR), - r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); + ioread16(ioaddr + MISR)); dev->stats.tx_errors++; @@ -463,9 +474,6 @@ static int r6040_close(struct net_device *dev) struct r6040_private *lp = netdev_priv(dev); struct pci_dev *pdev = lp->pdev; - /* deleted timer */ - del_timer_sync(&lp->timer); - spin_lock_irq(&lp->lock); napi_disable(&lp->napi); netif_stop_queue(dev); @@ -495,64 +503,14 @@ static int r6040_close(struct net_device *dev) return 0; } -/* Status of PHY CHIP */ -static int r6040_phy_mode_chk(struct net_device *dev) -{ - struct r6040_private *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - int phy_dat; - - /* PHY Link Status Check */ - phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1); - if (!(phy_dat & 0x4)) - phy_dat = 0x8000; /* Link Failed, full duplex */ - - /* PHY Chip Auto-Negotiation Status */ - phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1); - if (phy_dat & 0x0020) { - /* Auto Negotiation Mode */ - phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5); - phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4); - if (phy_dat & 0x140) - /* Force full duplex */ - phy_dat = 0x8000; - else - phy_dat = 0; - } else { - /* Force Mode */ - phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0); - if (phy_dat & 0x100) - phy_dat = 0x8000; - else - phy_dat = 0x0000; - } - - return phy_dat; -}; - -static void r6040_set_carrier(struct mii_if_info *mii) -{ - if (r6040_phy_mode_chk(mii->dev)) { - /* autoneg is off: Link is always assumed to be up */ - if (!netif_carrier_ok(mii->dev)) - netif_carrier_on(mii->dev); - } else - r6040_phy_mode_chk(mii->dev); -} - static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct r6040_private *lp = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(rq); - int rc; - if (!netif_running(dev)) + if (!lp->phydev) return -EINVAL; - spin_lock_irq(&lp->lock); - rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL); - spin_unlock_irq(&lp->lock); - r6040_set_carrier(&lp->mii_if); - return rc; + + return phy_mii_ioctl(lp->phydev, rq, cmd); } static int r6040_rx(struct net_device *dev, int limit) @@ -751,26 +709,6 @@ static int r6040_up(struct net_device *dev) if (ret) return ret; - /* Read the PHY ID */ - lp->switch_sig = r6040_phy_read(ioaddr, 0, 2); - - if (lp->switch_sig == ICPLUS_PHY_ID) { - r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */ - lp->phy_mode = 0x8000; - } else { - /* PHY Mode Check */ - r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP); - r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE); - - if (PHY_MODE == 0x3100) - lp->phy_mode = r6040_phy_mode_chk(dev); - else - lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; - } - - /* Set duplex mode */ - lp->mcr0 |= lp->phy_mode; - /* improve performance (by RDC guys) */ r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); @@ -783,35 +721,6 @@ static int r6040_up(struct net_device *dev) return 0; } -/* - A periodic timer routine - Polling PHY Chip Link Status -*/ -static void r6040_timer(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct r6040_private *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - u16 phy_mode; - - /* Polling PHY Chip Status */ - if (PHY_MODE == 0x3100) - phy_mode = r6040_phy_mode_chk(dev); - else - phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; - - if (phy_mode != lp->phy_mode) { - lp->phy_mode = phy_mode; - lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode; - iowrite16(lp->mcr0, ioaddr); - } - - /* Timer active again */ - mod_timer(&lp->timer, round_jiffies(jiffies + HZ)); - - /* Check media */ - mii_check_media(&lp->mii_if, 1, 1); -} /* Read/set MAC address routines */ static void r6040_mac_address(struct net_device *dev) @@ -873,10 +782,6 @@ static int r6040_open(struct net_device *dev) napi_enable(&lp->napi); netif_start_queue(dev); - /* set and active a timer process */ - setup_timer(&lp->timer, r6040_timer, (unsigned long) dev); - if (lp->switch_sig != ICPLUS_PHY_ID) - mod_timer(&lp->timer, jiffies + HZ); return 0; } @@ -1015,40 +920,22 @@ static void netdev_get_drvinfo(struct net_device *dev, static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct r6040_private *rp = netdev_priv(dev); - int rc; - - spin_lock_irq(&rp->lock); - rc = mii_ethtool_gset(&rp->mii_if, cmd); - spin_unlock_irq(&rp->lock); - return rc; + return phy_ethtool_gset(rp->phydev, cmd); } static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct r6040_private *rp = netdev_priv(dev); - int rc; - - spin_lock_irq(&rp->lock); - rc = mii_ethtool_sset(&rp->mii_if, cmd); - spin_unlock_irq(&rp->lock); - r6040_set_carrier(&rp->mii_if); - - return rc; -} - -static u32 netdev_get_link(struct net_device *dev) { struct r6040_private *rp = netdev_priv(dev); - return mii_link_ok(&rp->mii_if); + return phy_ethtool_sset(rp->phydev, cmd); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .set_settings = netdev_set_settings, - .get_link = netdev_get_link, + .get_link = ethtool_op_get_link, }; static const struct net_device_ops r6040_netdev_ops = { @@ -1067,6 +954,79 @@ static const struct net_device_ops r6040_netdev_ops = { #endif }; +static void r6040_adjust_link(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + struct phy_device *phydev = lp->phydev; + int status_changed = 0; + void __iomem *ioaddr = lp->base; + + BUG_ON(!phydev); + + if (lp->old_link != phydev->link) { + status_changed = 1; + lp->old_link = phydev->link; + } + + /* reflect duplex change */ + if (phydev->link && (lp->old_duplex != phydev->duplex)) { + lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0); + iowrite16(lp->mcr0, ioaddr); + + status_changed = 1; + lp->old_duplex = phydev->duplex; + } + + if (status_changed) { + pr_info("%s: link %s", dev->name, phydev->link ? + "UP" : "DOWN"); + if (phydev->link) + pr_cont(" - %d/%s", phydev->speed, + DUPLEX_FULL == phydev->duplex ? "full" : "half"); + pr_cont("\n"); + } +} + +static int r6040_mii_probe(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + struct phy_device *phydev = NULL; + + phydev = phy_find_first(lp->mii_bus); + if (!phydev) { + dev_err(&lp->pdev->dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link, + 0, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(&lp->pdev->dev, "could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + lp->phydev = phydev; + lp->old_link = 0; + lp->old_duplex = -1; + + dev_info(&lp->pdev->dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + + return 0; +} + static int __devinit r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1077,6 +1037,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, static int card_idx = -1; int bar = 0; u16 *adrp; + int i; printk("%s\n", version); @@ -1163,7 +1124,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, /* Init RDC private data */ lp->mcr0 = 0x1002; lp->phy_addr = phy_table[card_idx]; - lp->switch_sig = 0; /* The RDC-specific entries in the device structure. */ dev->netdev_ops = &r6040_netdev_ops; @@ -1171,28 +1131,54 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, dev->watchdog_timeo = TX_TIMEOUT; netif_napi_add(dev, &lp->napi, r6040_poll, 64); - lp->mii_if.dev = dev; - lp->mii_if.mdio_read = r6040_mdio_read; - lp->mii_if.mdio_write = r6040_mdio_write; - lp->mii_if.phy_id = lp->phy_addr; - lp->mii_if.phy_id_mask = 0x1f; - lp->mii_if.reg_num_mask = 0x1f; - - /* Check the vendor ID on the PHY, if 0xffff assume none attached */ - if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) { - dev_err(&pdev->dev, "Failed to detect an attached PHY\n"); - err = -ENODEV; + + lp->mii_bus = mdiobus_alloc(); + if (!lp->mii_bus) { + dev_err(&pdev->dev, "mdiobus_alloc() failed\n"); goto err_out_unmap; } + lp->mii_bus->priv = dev; + lp->mii_bus->read = r6040_mdiobus_read; + lp->mii_bus->write = r6040_mdiobus_write; + lp->mii_bus->reset = r6040_mdiobus_reset; + lp->mii_bus->name = "r6040_eth_mii"; + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx); + lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (!lp->mii_bus->irq) { + dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); + goto err_out_mdio; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + lp->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(lp->mii_bus); + if (err) { + dev_err(&pdev->dev, "failed to register MII bus\n"); + goto err_out_mdio_irq; + } + + err = r6040_mii_probe(dev); + if (err) { + dev_err(&pdev->dev, "failed to probe MII bus\n"); + goto err_out_mdio_unregister; + } + /* Register net device. After this dev->name assign */ err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Failed to register net device\n"); - goto err_out_unmap; + goto err_out_mdio_unregister; } return 0; +err_out_mdio_unregister: + mdiobus_unregister(lp->mii_bus); +err_out_mdio_irq: + kfree(lp->mii_bus->irq); +err_out_mdio: + mdiobus_free(lp->mii_bus); err_out_unmap: pci_iounmap(pdev, ioaddr); err_out_free_res: @@ -1206,8 +1192,12 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, static void __devexit r6040_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); + struct r6040_private *lp = netdev_priv(dev); unregister_netdev(dev); + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); pci_release_regions(pdev); free_netdev(dev); pci_disable_device(pdev); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index cdc6a5c2e70d81955efec9d3c633a593ef943b16..35540411990d5867f1ed9d44164fc8b7e47a8a11 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -88,7 +88,7 @@ static const int multicast_filter_limit = 32; #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) #define RTL_R8(reg) readb (ioaddr + (reg)) #define RTL_R16(reg) readw (ioaddr + (reg)) -#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg))) +#define RTL_R32(reg) readl (ioaddr + (reg)) enum mac_version { RTL_GIGA_MAC_NONE = 0x00, diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 416669fd68c6bcbaf42b4ebc4962195713f1d459..3688325c11f5367c83d68f86e0d7f72b6c9b4cdc 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h @@ -1,6 +1,6 @@ /************************************************************************ * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC - * Copyright(c) 2002-2007 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 1d37f0c310ca4bd91d715129f24a11356ad41fa1..18bc5b718bbb51875dd5e2fbf14be6f626d0fa88 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -1,6 +1,6 @@ /************************************************************************ * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC - * Copyright(c) 2002-2007 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. * * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. @@ -38,7 +38,7 @@ * Tx descriptors that can be associated with each corresponding FIFO. * intr_type: This defines the type of interrupt. The values can be 0(INTA), * 2(MSI_X). Default value is '2(MSI_X)' - * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not. + * lro: Specifies whether to enable Large Receive Offload (LRO) or not. * Possible values '1' for enable '0' for disable. Default is '0' * lro_max_pkts: This parameter defines maximum number of packets can be * aggregated as a single large packet @@ -90,7 +90,7 @@ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.26.25" +#define DRV_VERSION "2.0.26.26" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; @@ -496,7 +496,7 @@ S2IO_PARM_INT(rxsync_frequency, 3); /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ S2IO_PARM_INT(intr_type, 2); /* Large receive offload feature */ -static unsigned int lro_enable; +static unsigned int lro_enable = 1; module_param_named(lro, lro_enable, uint, 0); /* Max pkts to be aggregated by LRO at one time. If not specified, @@ -795,7 +795,6 @@ static int init_shared_mem(struct s2io_nic *nic) ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1; ring->nic = nic; ring->ring_no = i; - ring->lro = lro_enable; blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1); /* Allocating all the Rx blocks */ @@ -5797,7 +5796,7 @@ static void s2io_vpd_read(struct s2io_nic *nic) { u8 *vpd_data; u8 data; - int i = 0, cnt, fail = 0; + int i = 0, cnt, len, fail = 0; int vpd_addr = 0x80; struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; @@ -5838,20 +5837,28 @@ static void s2io_vpd_read(struct s2io_nic *nic) if (!fail) { /* read serial number of adapter */ - for (cnt = 0; cnt < 256; cnt++) { + for (cnt = 0; cnt < 252; cnt++) { if ((vpd_data[cnt] == 'S') && - (vpd_data[cnt+1] == 'N') && - (vpd_data[cnt+2] < VPD_STRING_LEN)) { - memset(nic->serial_num, 0, VPD_STRING_LEN); - memcpy(nic->serial_num, &vpd_data[cnt + 3], - vpd_data[cnt+2]); - break; + (vpd_data[cnt+1] == 'N')) { + len = vpd_data[cnt+2]; + if (len < min(VPD_STRING_LEN, 256-cnt-2)) { + memcpy(nic->serial_num, + &vpd_data[cnt + 3], + len); + memset(nic->serial_num+len, + 0, + VPD_STRING_LEN-len); + break; + } } } } - if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) - memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); + if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) { + len = vpd_data[1]; + memcpy(nic->product_name, &vpd_data[3], len); + nic->product_name[len] = 0; + } kfree(vpd_data); swstats->mem_freed += 256; } @@ -6707,6 +6714,7 @@ static u32 s2io_ethtool_op_get_tso(struct net_device *dev) { return (dev->features & NETIF_F_TSO) != 0; } + static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) { if (data) @@ -6717,6 +6725,42 @@ static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) return 0; } +static int s2io_ethtool_set_flags(struct net_device *dev, u32 data) +{ + struct s2io_nic *sp = netdev_priv(dev); + int rc = 0; + int changed = 0; + + if (data & ~ETH_FLAG_LRO) + return -EINVAL; + + if (data & ETH_FLAG_LRO) { + if (lro_enable) { + if (!(dev->features & NETIF_F_LRO)) { + dev->features |= NETIF_F_LRO; + changed = 1; + } + } else + rc = -EINVAL; + } else if (dev->features & NETIF_F_LRO) { + dev->features &= ~NETIF_F_LRO; + changed = 1; + } + + if (changed && netif_running(dev)) { + s2io_stop_all_tx_queue(sp); + s2io_card_down(sp); + sp->lro = !!(dev->features & NETIF_F_LRO); + rc = s2io_card_up(sp); + if (rc) + s2io_reset(sp); + else + s2io_start_all_tx_queue(sp); + } + + return rc; +} + static const struct ethtool_ops netdev_ethtool_ops = { .get_settings = s2io_ethtool_gset, .set_settings = s2io_ethtool_sset, @@ -6733,6 +6777,8 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_rx_csum = s2io_ethtool_get_rx_csum, .set_rx_csum = s2io_ethtool_set_rx_csum, .set_tx_csum = s2io_ethtool_op_set_tx_csum, + .set_flags = s2io_ethtool_set_flags, + .get_flags = ethtool_op_get_flags, .set_sg = ethtool_op_set_sg, .get_tso = s2io_ethtool_op_get_tso, .set_tso = s2io_ethtool_op_set_tso, @@ -7261,6 +7307,7 @@ static int s2io_card_up(struct s2io_nic *sp) struct ring_info *ring = &mac_control->rings[i]; ring->mtu = dev->mtu; + ring->lro = sp->lro; ret = fill_rx_buffers(sp, ring, 1); if (ret) { DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", @@ -7847,7 +7894,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Private member variable initialized to s2io NIC structure */ sp = netdev_priv(dev); - memset(sp, 0, sizeof(struct s2io_nic)); sp->dev = dev; sp->pdev = pdev; sp->high_dma_flag = dma_flag; @@ -8001,7 +8047,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) dev->netdev_ops = &s2io_netdev_ops; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - + if (lro_enable) + dev->features |= NETIF_F_LRO; dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; if (sp->high_dma_flag == true) dev->features |= NETIF_F_HIGHDMA; @@ -8159,7 +8206,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) goto register_failed; } s2io_vpd_read(sp); - DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n"); + DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n"); DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name, sp->product_name, pdev->revision); DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 7f3a53dcc6ef303ca91fa506703074195f549d11..0af0335339053f8512280077d66b0ab4f41a2941 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -1,6 +1,6 @@ /************************************************************************ * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC - * Copyright(c) 2002-2007 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 79eee3062083ab3b900da0c68e7df03cf5bffe9d..8e6bd45b9f311f4ea4c164d6d2528455cb64ae8e 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -2532,7 +2532,7 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!netif_running(dev) || !sc->phy_dev) return -EINVAL; - return phy_mii_ioctl(sc->phy_dev, if_mii(rq), cmd); + return phy_mii_ioctl(sc->phy_dev, rq, cmd); } static int sbmac_close(struct net_device *dev) diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 156460527231b4899044b714f489cb9bc357cb56..ba674c5ca29e76a87da30259505b5fa7e0e43098 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -27,6 +27,7 @@ #include "nic.h" #include "mcdi.h" +#include "workarounds.h" /************************************************************************** * @@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = { #define EFX_MAX_MTU (9 * 1024) -/* RX slow fill workqueue. If memory allocation fails in the fast path, - * a work item is pushed onto this work queue to retry the allocation later, - * to avoid the NIC being starved of RX buffers. Since this is a per cpu - * workqueue, there is nothing to be gained in making it per NIC - */ -static struct workqueue_struct *refill_workqueue; - /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. @@ -195,6 +189,13 @@ module_param(irq_adapt_high_thresh, uint, 0644); MODULE_PARM_DESC(irq_adapt_high_thresh, "Threshold score for increasing IRQ moderation"); +static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + /************************************************************************** * * Utility functions and prototypes @@ -278,16 +279,16 @@ static int efx_poll(struct napi_struct *napi, int budget) { struct efx_channel *channel = container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; int spent; - EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", - channel->channel, raw_smp_processor_id()); + netif_vdbg(efx, intr, efx->net_dev, + "channel %d NAPI poll executing on CPU %d\n", + channel->channel, raw_smp_processor_id()); spent = efx_process_channel(channel, budget); if (spent < budget) { - struct efx_nic *efx = channel->efx; - if (channel->channel < efx->n_rx_channels && efx->irq_rx_adaptive && unlikely(++channel->irq_count == 1000)) { @@ -363,7 +364,8 @@ void efx_process_channel_now(struct efx_channel *channel) */ static int efx_probe_eventq(struct efx_channel *channel) { - EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); + netif_dbg(channel->efx, probe, channel->efx->net_dev, + "chan %d create event queue\n", channel->channel); return efx_nic_probe_eventq(channel); } @@ -371,7 +373,8 @@ static int efx_probe_eventq(struct efx_channel *channel) /* Prepare channel's event queue */ static void efx_init_eventq(struct efx_channel *channel) { - EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d init event queue\n", channel->channel); channel->eventq_read_ptr = 0; @@ -380,14 +383,16 @@ static void efx_init_eventq(struct efx_channel *channel) static void efx_fini_eventq(struct efx_channel *channel) { - EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d fini event queue\n", channel->channel); efx_nic_fini_eventq(channel); } static void efx_remove_eventq(struct efx_channel *channel) { - EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d remove event queue\n", channel->channel); efx_nic_remove_eventq(channel); } @@ -404,7 +409,8 @@ static int efx_probe_channel(struct efx_channel *channel) struct efx_rx_queue *rx_queue; int rc; - EFX_LOG(channel->efx, "creating channel %d\n", channel->channel); + netif_dbg(channel->efx, probe, channel->efx->net_dev, + "creating channel %d\n", channel->channel); rc = efx_probe_eventq(channel); if (rc) @@ -474,12 +480,15 @@ static void efx_init_channels(struct efx_nic *efx) */ efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_hash_size + efx->type->rx_buffer_padding); - efx->rx_buffer_order = get_order(efx->rx_buffer_len); + efx->rx_buffer_order = get_order(efx->rx_buffer_len + + sizeof(struct efx_rx_page_state)); /* Initialise the channels */ efx_for_each_channel(channel, efx) { - EFX_LOG(channel->efx, "init chan %d\n", channel->channel); + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "init chan %d\n", channel->channel); efx_init_eventq(channel); @@ -506,7 +515,8 @@ static void efx_start_channel(struct efx_channel *channel) { struct efx_rx_queue *rx_queue; - EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); + netif_dbg(channel->efx, ifup, channel->efx->net_dev, + "starting chan %d\n", channel->channel); /* The interrupt handler for this channel may set work_pending * as soon as we enable it. Make sure it's cleared before @@ -515,11 +525,11 @@ static void efx_start_channel(struct efx_channel *channel) channel->enabled = true; smp_wmb(); - napi_enable(&channel->napi_str); - - /* Load up RX descriptors */ + /* Fill the queues before enabling NAPI */ efx_for_each_channel_rx_queue(rx_queue, channel) efx_fast_push_rx_descriptors(rx_queue); + + napi_enable(&channel->napi_str); } /* This disables event queue processing and packet transmission. @@ -528,21 +538,14 @@ static void efx_start_channel(struct efx_channel *channel) */ static void efx_stop_channel(struct efx_channel *channel) { - struct efx_rx_queue *rx_queue; - if (!channel->enabled) return; - EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); + netif_dbg(channel->efx, ifdown, channel->efx->net_dev, + "stop chan %d\n", channel->channel); channel->enabled = false; napi_disable(&channel->napi_str); - - /* Ensure that any worker threads have exited or will be no-ops */ - efx_for_each_channel_rx_queue(rx_queue, channel) { - spin_lock_bh(&rx_queue->add_lock); - spin_unlock_bh(&rx_queue->add_lock); - } } static void efx_fini_channels(struct efx_nic *efx) @@ -556,13 +559,24 @@ static void efx_fini_channels(struct efx_nic *efx) BUG_ON(efx->port_enabled); rc = efx_nic_flush_queues(efx); - if (rc) - EFX_ERR(efx, "failed to flush queues\n"); - else - EFX_LOG(efx, "successfully flushed all queues\n"); + if (rc && EFX_WORKAROUND_7803(efx)) { + /* Schedule a reset to recover from the flush failure. The + * descriptor caches reference memory we're about to free, + * but falcon_reconfigure_mac_wrapper() won't reconnect + * the MACs because of the pending reset. */ + netif_err(efx, drv, efx->net_dev, + "Resetting to recover from flush failure\n"); + efx_schedule_reset(efx, RESET_TYPE_ALL); + } else if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } efx_for_each_channel(channel, efx) { - EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "shut down chan %d\n", channel->channel); efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); @@ -577,7 +591,8 @@ static void efx_remove_channel(struct efx_channel *channel) struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; - EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel); + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "destroy chan %d\n", channel->channel); efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); @@ -586,9 +601,9 @@ static void efx_remove_channel(struct efx_channel *channel) efx_remove_eventq(channel); } -void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) { - queue_delayed_work(refill_workqueue, &rx_queue->work, delay); + mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); } /************************************************************************** @@ -628,12 +643,13 @@ void efx_link_status_changed(struct efx_nic *efx) /* Status message for kernel log */ if (link_state->up) { - EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", - link_state->speed, link_state->fd ? "full" : "half", - efx->net_dev->mtu, - (efx->promiscuous ? " [PROMISC]" : "")); + netif_info(efx, link, efx->net_dev, + "link up at %uMbps %s-duplex (MTU %d)%s\n", + link_state->speed, link_state->fd ? "full" : "half", + efx->net_dev->mtu, + (efx->promiscuous ? " [PROMISC]" : "")); } else { - EFX_INFO(efx, "link down\n"); + netif_info(efx, link, efx->net_dev, "link down\n"); } } @@ -737,7 +753,7 @@ static int efx_probe_port(struct efx_nic *efx) { int rc; - EFX_LOG(efx, "create port\n"); + netif_dbg(efx, probe, efx->net_dev, "create port\n"); if (phy_flash_cfg) efx->phy_mode = PHY_MODE_SPECIAL; @@ -751,15 +767,16 @@ static int efx_probe_port(struct efx_nic *efx) if (is_valid_ether_addr(efx->mac_address)) { memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); } else { - EFX_ERR(efx, "invalid MAC address %pM\n", - efx->mac_address); + netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", + efx->mac_address); if (!allow_bad_hwaddr) { rc = -EINVAL; goto err; } random_ether_addr(efx->net_dev->dev_addr); - EFX_INFO(efx, "using locally-generated MAC %pM\n", - efx->net_dev->dev_addr); + netif_info(efx, probe, efx->net_dev, + "using locally-generated MAC %pM\n", + efx->net_dev->dev_addr); } return 0; @@ -773,7 +790,7 @@ static int efx_init_port(struct efx_nic *efx) { int rc; - EFX_LOG(efx, "init port\n"); + netif_dbg(efx, drv, efx->net_dev, "init port\n"); mutex_lock(&efx->mac_lock); @@ -804,7 +821,7 @@ static int efx_init_port(struct efx_nic *efx) static void efx_start_port(struct efx_nic *efx) { - EFX_LOG(efx, "start port\n"); + netif_dbg(efx, ifup, efx->net_dev, "start port\n"); BUG_ON(efx->port_enabled); mutex_lock(&efx->mac_lock); @@ -821,7 +838,7 @@ static void efx_start_port(struct efx_nic *efx) /* Prevent efx_mac_work() and efx_monitor() from working */ static void efx_stop_port(struct efx_nic *efx) { - EFX_LOG(efx, "stop port\n"); + netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); mutex_lock(&efx->mac_lock); efx->port_enabled = false; @@ -836,7 +853,7 @@ static void efx_stop_port(struct efx_nic *efx) static void efx_fini_port(struct efx_nic *efx) { - EFX_LOG(efx, "shut down port\n"); + netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); if (!efx->port_initialized) return; @@ -850,7 +867,7 @@ static void efx_fini_port(struct efx_nic *efx) static void efx_remove_port(struct efx_nic *efx) { - EFX_LOG(efx, "destroying port\n"); + netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); efx->type->remove_port(efx); } @@ -868,11 +885,12 @@ static int efx_init_io(struct efx_nic *efx) dma_addr_t dma_mask = efx->type->max_dma_mask; int rc; - EFX_LOG(efx, "initialising I/O\n"); + netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); rc = pci_enable_device(pci_dev); if (rc) { - EFX_ERR(efx, "failed to enable PCI device\n"); + netif_err(efx, probe, efx->net_dev, + "failed to enable PCI device\n"); goto fail1; } @@ -890,39 +908,45 @@ static int efx_init_io(struct efx_nic *efx) dma_mask >>= 1; } if (rc) { - EFX_ERR(efx, "could not find a suitable DMA mask\n"); + netif_err(efx, probe, efx->net_dev, + "could not find a suitable DMA mask\n"); goto fail2; } - EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask); + netif_dbg(efx, probe, efx->net_dev, + "using DMA mask %llx\n", (unsigned long long) dma_mask); rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); if (rc) { /* pci_set_consistent_dma_mask() is not *allowed* to * fail with a mask that pci_set_dma_mask() accepted, * but just in case... */ - EFX_ERR(efx, "failed to set consistent DMA mask\n"); + netif_err(efx, probe, efx->net_dev, + "failed to set consistent DMA mask\n"); goto fail2; } efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); if (rc) { - EFX_ERR(efx, "request for memory BAR failed\n"); + netif_err(efx, probe, efx->net_dev, + "request for memory BAR failed\n"); rc = -EIO; goto fail3; } efx->membase = ioremap_nocache(efx->membase_phys, efx->type->mem_map_size); if (!efx->membase) { - EFX_ERR(efx, "could not map memory BAR at %llx+%x\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size); + netif_err(efx, probe, efx->net_dev, + "could not map memory BAR at %llx+%x\n", + (unsigned long long)efx->membase_phys, + efx->type->mem_map_size); rc = -ENOMEM; goto fail4; } - EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size, efx->membase); + netif_dbg(efx, probe, efx->net_dev, + "memory BAR at %llx+%x (virtual %p)\n", + (unsigned long long)efx->membase_phys, + efx->type->mem_map_size, efx->membase); return 0; @@ -938,7 +962,7 @@ static int efx_init_io(struct efx_nic *efx) static void efx_fini_io(struct efx_nic *efx) { - EFX_LOG(efx, "shutting down I/O\n"); + netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); if (efx->membase) { iounmap(efx->membase); @@ -1002,9 +1026,11 @@ static void efx_probe_interrupts(struct efx_nic *efx) xentries[i].entry = i; rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); if (rc > 0) { - EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" - " available (%d < %d).\n", rc, n_channels); - EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors" + " available (%d < %d).\n", rc, n_channels); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); EFX_BUG_ON_PARANOID(rc >= n_channels); n_channels = rc; rc = pci_enable_msix(efx->pci_dev, xentries, @@ -1028,7 +1054,8 @@ static void efx_probe_interrupts(struct efx_nic *efx) } else { /* Fall back to single channel MSI */ efx->interrupt_mode = EFX_INT_MODE_MSI; - EFX_ERR(efx, "could not enable MSI-X\n"); + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X\n"); } } @@ -1041,7 +1068,8 @@ static void efx_probe_interrupts(struct efx_nic *efx) if (rc == 0) { efx->channel[0].irq = efx->pci_dev->irq; } else { - EFX_ERR(efx, "could not enable MSI\n"); + netif_err(efx, drv, efx->net_dev, + "could not enable MSI\n"); efx->interrupt_mode = EFX_INT_MODE_LEGACY; } } @@ -1093,9 +1121,10 @@ static void efx_set_channels(struct efx_nic *efx) static int efx_probe_nic(struct efx_nic *efx) { + size_t i; int rc; - EFX_LOG(efx, "creating NIC\n"); + netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); /* Carry out hardware-type specific initialisation */ rc = efx->type->probe(efx); @@ -1106,6 +1135,11 @@ static int efx_probe_nic(struct efx_nic *efx) * in MSI-X interrupts. */ efx_probe_interrupts(efx); + if (efx->n_channels > 1) + get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = i % efx->n_rx_channels; + efx_set_channels(efx); efx->net_dev->real_num_tx_queues = efx->n_tx_channels; @@ -1117,7 +1151,7 @@ static int efx_probe_nic(struct efx_nic *efx) static void efx_remove_nic(struct efx_nic *efx) { - EFX_LOG(efx, "destroying NIC\n"); + netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); efx_remove_interrupts(efx); efx->type->remove(efx); @@ -1137,14 +1171,14 @@ static int efx_probe_all(struct efx_nic *efx) /* Create NIC */ rc = efx_probe_nic(efx); if (rc) { - EFX_ERR(efx, "failed to create NIC\n"); + netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); goto fail1; } /* Create port */ rc = efx_probe_port(efx); if (rc) { - EFX_ERR(efx, "failed to create port\n"); + netif_err(efx, probe, efx->net_dev, "failed to create port\n"); goto fail2; } @@ -1152,8 +1186,9 @@ static int efx_probe_all(struct efx_nic *efx) efx_for_each_channel(channel, efx) { rc = efx_probe_channel(channel); if (rc) { - EFX_ERR(efx, "failed to create channel %d\n", - channel->channel); + netif_err(efx, probe, efx->net_dev, + "failed to create channel %d\n", + channel->channel); goto fail3; } } @@ -1233,15 +1268,8 @@ static void efx_start_all(struct efx_nic *efx) * since we're holding the rtnl_lock at this point. */ static void efx_flush_all(struct efx_nic *efx) { - struct efx_rx_queue *rx_queue; - /* Make sure the hardware monitor is stopped */ cancel_delayed_work_sync(&efx->monitor_work); - - /* Ensure that all RX slow refills are complete. */ - efx_for_each_rx_queue(rx_queue, efx) - cancel_delayed_work_sync(&rx_queue->work); - /* Stop scheduled port reconfigurations */ cancel_work_sync(&efx->mac_work); } @@ -1356,8 +1384,9 @@ static void efx_monitor(struct work_struct *data) struct efx_nic *efx = container_of(data, struct efx_nic, monitor_work.work); - EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", - raw_smp_processor_id()); + netif_vdbg(efx, timer, efx->net_dev, + "hardware monitor executing on CPU %d\n", + raw_smp_processor_id()); BUG_ON(efx->type->monitor == NULL); /* If the mac_lock is already held then it is likely a port @@ -1464,8 +1493,8 @@ static int efx_net_open(struct net_device *net_dev) struct efx_nic *efx = netdev_priv(net_dev); EFX_ASSERT_RESET_SERIALISED(efx); - EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, - raw_smp_processor_id()); + netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); if (efx->state == STATE_DISABLED) return -EIO; @@ -1490,8 +1519,8 @@ static int efx_net_stop(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, - raw_smp_processor_id()); + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); if (efx->state != STATE_DISABLED) { /* Stop the device and flush all the channels */ @@ -1504,11 +1533,10 @@ static int efx_net_stop(struct net_device *net_dev) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -static struct net_device_stats *efx_net_stats(struct net_device *net_dev) +static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_mac_stats *mac_stats = &efx->mac_stats; - struct net_device_stats *stats = &net_dev->stats; spin_lock_bh(&efx->stats_lock); efx->type->update_stats(efx); @@ -1530,11 +1558,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) stats->tx_window_errors = mac_stats->tx_late_collision; stats->rx_errors = (stats->rx_length_errors + - stats->rx_over_errors + stats->rx_crc_errors + stats->rx_frame_errors + - stats->rx_fifo_errors + - stats->rx_missed_errors + mac_stats->rx_symbol_error); stats->tx_errors = (stats->tx_window_errors + mac_stats->tx_bad); @@ -1547,8 +1572,9 @@ static void efx_watchdog(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n", - efx->port_enabled); + netif_err(efx, tx_err, efx->net_dev, + "TX stuck with port_enabled=%d: resetting channels\n", + efx->port_enabled); efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); } @@ -1567,7 +1593,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) efx_stop_all(efx); - EFX_LOG(efx, "changing MTU to %d\n", new_mtu); + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); efx_fini_channels(efx); @@ -1593,8 +1619,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) EFX_ASSERT_RESET_SERIALISED(efx); if (!is_valid_ether_addr(new_addr)) { - EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n", - new_addr); + netif_err(efx, drv, efx->net_dev, + "invalid ethernet MAC address requested: %pM\n", + new_addr); return -EINVAL; } @@ -1645,7 +1672,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, - .ndo_get_stats = efx_net_stats, + .ndo_get_stats64 = efx_net_stats, .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, @@ -1697,7 +1724,6 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; - SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); /* Clear MAC statistics */ @@ -1722,7 +1748,8 @@ static int efx_register_netdev(struct efx_nic *efx) rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); if (rc) { - EFX_ERR(efx, "failed to init net dev attributes\n"); + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); goto fail_registered; } @@ -1730,7 +1757,7 @@ static int efx_register_netdev(struct efx_nic *efx) fail_locked: rtnl_unlock(); - EFX_ERR(efx, "could not register net dev\n"); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); return rc; fail_registered: @@ -1795,7 +1822,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) rc = efx->type->init(efx); if (rc) { - EFX_ERR(efx, "failed to initialise NIC\n"); + netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); goto fail; } @@ -1807,7 +1834,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (rc) goto fail; if (efx->phy_op->reconfigure(efx)) - EFX_ERR(efx, "could not restore PHY settings\n"); + netif_err(efx, drv, efx->net_dev, + "could not restore PHY settings\n"); } efx->mac_op->reconfigure(efx); @@ -1840,13 +1868,14 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) int rc, rc2; bool disabled; - EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method)); + netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", + RESET_TYPE(method)); efx_reset_down(efx, method); rc = efx->type->reset(efx, method); if (rc) { - EFX_ERR(efx, "failed to reset hardware\n"); + netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); goto out; } @@ -1871,10 +1900,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) if (disabled) { dev_close(efx->net_dev); - EFX_ERR(efx, "has been disabled\n"); + netif_err(efx, drv, efx->net_dev, "has been disabled\n"); efx->state = STATE_DISABLED; } else { - EFX_LOG(efx, "reset complete\n"); + netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); } return rc; } @@ -1886,10 +1915,14 @@ static void efx_reset_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); + if (efx->reset_pending == RESET_TYPE_NONE) + return; + /* If we're not RUNNING then don't reset. Leave the reset_pending * flag set so that efx_pci_probe_main will be retried */ if (efx->state != STATE_RUNNING) { - EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); + netif_info(efx, drv, efx->net_dev, + "scheduled reset quenched. NIC not RUNNING\n"); return; } @@ -1903,7 +1936,8 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) enum reset_type method; if (efx->reset_pending != RESET_TYPE_NONE) { - EFX_INFO(efx, "quenching already scheduled reset\n"); + netif_info(efx, drv, efx->net_dev, + "quenching already scheduled reset\n"); return; } @@ -1927,10 +1961,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) } if (method != type) - EFX_LOG(efx, "scheduling %s reset for %s\n", - RESET_TYPE(method), RESET_TYPE(type)); + netif_dbg(efx, drv, efx->net_dev, + "scheduling %s reset for %s\n", + RESET_TYPE(method), RESET_TYPE(type)); else - EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method)); + netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", + RESET_TYPE(method)); efx->reset_pending = method; @@ -2017,6 +2053,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, INIT_WORK(&efx->reset_work, efx_reset_work); INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); efx->pci_dev = pci_dev; + efx->msg_enable = debug; efx->state = STATE_INIT; efx->reset_pending = RESET_TYPE_NONE; strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); @@ -2052,8 +2089,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, rx_queue->queue = i; rx_queue->channel = &efx->channel[0]; /* for safety */ rx_queue->buffer = NULL; - spin_lock_init(&rx_queue->add_lock); - INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); + setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, + (unsigned long)rx_queue); } efx->type = type; @@ -2136,7 +2173,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_pci_remove_main(efx); efx_fini_io(efx); - EFX_LOG(efx, "shutdown successful\n"); + netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); pci_set_drvdata(pci_dev, NULL); efx_fini_struct(efx); @@ -2161,13 +2198,15 @@ static int efx_pci_probe_main(struct efx_nic *efx) rc = efx->type->init(efx); if (rc) { - EFX_ERR(efx, "failed to initialise NIC\n"); + netif_err(efx, probe, efx->net_dev, + "failed to initialise NIC\n"); goto fail3; } rc = efx_init_port(efx); if (rc) { - EFX_ERR(efx, "failed to initialise port\n"); + netif_err(efx, probe, efx->net_dev, + "failed to initialise port\n"); goto fail4; } @@ -2223,11 +2262,13 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, NETIF_F_HIGHDMA | NETIF_F_TSO); efx = netdev_priv(net_dev); pci_set_drvdata(pci_dev, efx); + SET_NETDEV_DEV(net_dev, &pci_dev->dev); rc = efx_init_struct(efx, type, pci_dev, net_dev); if (rc) goto fail1; - EFX_INFO(efx, "Solarflare Communications NIC detected\n"); + netif_info(efx, probe, efx->net_dev, + "Solarflare Communications NIC detected\n"); /* Set up basic I/O (BAR mappings etc) */ rc = efx_init_io(efx); @@ -2265,7 +2306,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, } if (rc) { - EFX_ERR(efx, "Could not reset NIC\n"); + netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); goto fail4; } @@ -2277,7 +2318,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, if (rc) goto fail5; - EFX_LOG(efx, "initialisation successful\n"); + netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); rtnl_lock(); efx_mtd_probe(efx); /* allowed to fail */ @@ -2293,7 +2334,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, efx_fini_struct(efx); fail1: WARN_ON(rc > 0); - EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); + netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); return rc; } @@ -2332,6 +2373,9 @@ static int efx_pm_thaw(struct device *dev) efx->type->resume_wol(efx); + /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ + queue_work(reset_workqueue, &efx->reset_work); + return 0; } @@ -2394,7 +2438,7 @@ static struct dev_pm_ops efx_pm_ops = { }; static struct pci_driver efx_pci_driver = { - .name = EFX_DRIVER_NAME, + .name = KBUILD_MODNAME, .id_table = efx_pci_table, .probe = efx_pci_probe, .remove = efx_pci_remove, @@ -2421,11 +2465,6 @@ static int __init efx_init_module(void) if (rc) goto err_notifier; - refill_workqueue = create_workqueue("sfc_refill"); - if (!refill_workqueue) { - rc = -ENOMEM; - goto err_refill; - } reset_workqueue = create_singlethread_workqueue("sfc_reset"); if (!reset_workqueue) { rc = -ENOMEM; @@ -2441,8 +2480,6 @@ static int __init efx_init_module(void) err_pci: destroy_workqueue(reset_workqueue); err_reset: - destroy_workqueue(refill_workqueue); - err_refill: unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: return rc; @@ -2454,7 +2491,6 @@ static void __exit efx_exit_module(void) pci_unregister_driver(&efx_pci_driver); destroy_workqueue(reset_workqueue); - destroy_workqueue(refill_workqueue); unregister_netdevice_notifier(&efx_netdev_notifier); } diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index ffd708c5304af0dd7d584c50b733babb1f6b36ad..060dc952a0fd062f93ce21642d3a8471299c86e8 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_rx_strategy(struct efx_channel *channel); extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); -extern void efx_rx_work(struct work_struct *data); +extern void efx_rx_slow_fill(unsigned long context); extern void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, bool checksummed); extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, unsigned int len, bool checksummed, bool discard); -extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); +extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_RXQ_SIZE 1024 #define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) @@ -106,8 +106,9 @@ extern unsigned int efx_monitor_interval; static inline void efx_schedule_channel(struct efx_channel *channel) { - EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n", - channel->channel, raw_smp_processor_id()); + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d scheduling NAPI poll on CPU%d\n", + channel->channel, raw_smp_processor_id()); channel->work_pending = true; napi_schedule(&channel->napi_str); diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 22026bfbc4c1e8adbef1bacf30079edb0bf2f1c3..fd19d6ab97a256259db66636f2856bc3bf8ebe26 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -218,8 +218,8 @@ int efx_ethtool_set_settings(struct net_device *net_dev, /* GMAC does not support 1000Mbps HD */ if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { - EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" - " setting\n"); + netif_dbg(efx, drv, efx->net_dev, + "rejecting unsupported 1000Mbps HD setting\n"); return -EINVAL; } @@ -234,7 +234,7 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); - strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) siena_print_fwver(efx, info->fw_version, @@ -242,6 +242,32 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } +static int efx_ethtool_get_regs_len(struct net_device *net_dev) +{ + return efx_nic_get_regs_len(netdev_priv(net_dev)); +} + +static void efx_ethtool_get_regs(struct net_device *net_dev, + struct ethtool_regs *regs, void *buf) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + regs->version = efx->type->revision; + efx_nic_get_regs(efx, buf); +} + +static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->msg_enable; +} + +static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) +{ + struct efx_nic *efx = netdev_priv(net_dev); + efx->msg_enable = msg_enable; +} + /** * efx_fill_test - fill in an individual self-test entry * @test_index: Index of the test @@ -443,12 +469,13 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, struct efx_mac_stats *mac_stats = &efx->mac_stats; struct efx_ethtool_stat *stat; struct efx_channel *channel; + struct rtnl_link_stats64 temp; int i; EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); /* Update MAC and NIC statistics */ - dev_get_stats(net_dev); + dev_get_stats(net_dev, &temp); /* Fill detailed statistics buffer */ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { @@ -520,6 +547,14 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) return efx->rx_checksum_enabled; } +static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH; + + return ethtool_op_set_flags(net_dev, data, supported); +} + static void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data) { @@ -539,7 +574,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (!already_up) { rc = dev_open(efx->net_dev); if (rc) { - EFX_ERR(efx, "failed opening device.\n"); + netif_err(efx, drv, efx->net_dev, + "failed opening device.\n"); goto fail2; } } @@ -551,9 +587,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (!already_up) dev_close(efx->net_dev); - EFX_LOG(efx, "%s %sline self-tests\n", - rc == 0 ? "passed" : "failed", - (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", + rc == 0 ? "passed" : "failed", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); fail2: fail1: @@ -679,8 +715,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, return -EOPNOTSUPP; if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { - EFX_ERR(efx, "invalid coalescing setting. " - "Only rx/tx_coalesce_usecs_irq are supported\n"); + netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. " + "Only rx/tx_coalesce_usecs_irq are supported\n"); return -EOPNOTSUPP; } @@ -692,8 +728,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, efx_for_each_tx_queue(tx_queue, efx) { if ((tx_queue->channel->channel < efx->n_rx_channels) && tx_usecs) { - EFX_ERR(efx, "Channel is shared. " - "Only RX coalescing may be set\n"); + netif_err(efx, drv, efx->net_dev, "Channel is shared. " + "Only RX coalescing may be set\n"); return -EOPNOTSUPP; } } @@ -721,13 +757,15 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, (pause->autoneg ? EFX_FC_AUTO : 0)); if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { - EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n"); + netif_dbg(efx, drv, efx->net_dev, + "Flow control unsupported: tx ON rx OFF\n"); rc = -EINVAL; goto out; } if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { - EFX_LOG(efx, "Autonegotiation is disabled\n"); + netif_dbg(efx, drv, efx->net_dev, + "Autonegotiation is disabled\n"); rc = -EINVAL; goto out; } @@ -758,8 +796,9 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { rc = efx->phy_op->reconfigure(efx); if (rc) { - EFX_ERR(efx, "Unable to advertise requested flow " - "control setting\n"); + netif_err(efx, drv, efx->net_dev, + "Unable to advertise requested flow " + "control setting\n"); goto out; } } @@ -830,10 +869,101 @@ extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) return efx_reset(efx, method); } +static int +efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, void *rules __always_unused) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = efx->n_rx_channels; + return 0; + + case ETHTOOL_GRXFH: { + unsigned min_revision = 0; + + info->data = 0; + switch (info->flow_type) { + case TCP_V4_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + min_revision = EFX_REV_FALCON_B0; + break; + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + min_revision = EFX_REV_SIENA_A0; + break; + default: + break; + } + if (efx_nic_rev(efx) < min_revision) + info->data = 0; + return 0; + } + + default: + return -EOPNOTSUPP; + } +} + +static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, + struct ethtool_rxfh_indir *indir) +{ + struct efx_nic *efx = netdev_priv(net_dev); + size_t copy_size = + min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) + return -EOPNOTSUPP; + + indir->size = ARRAY_SIZE(efx->rx_indir_table); + memcpy(indir->ring_index, efx->rx_indir_table, + copy_size * sizeof(indir->ring_index[0])); + return 0; +} + +static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, + const struct ethtool_rxfh_indir *indir) +{ + struct efx_nic *efx = netdev_priv(net_dev); + size_t i; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) + return -EOPNOTSUPP; + + /* Validate size and indices */ + if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + if (indir->ring_index[i] >= efx->n_rx_channels) + return -EINVAL; + + memcpy(efx->rx_indir_table, indir->ring_index, + sizeof(efx->rx_indir_table)); + efx_nic_push_rx_indir_table(efx); + return 0; +} + const struct ethtool_ops efx_ethtool_ops = { .get_settings = efx_ethtool_get_settings, .set_settings = efx_ethtool_set_settings, .get_drvinfo = efx_ethtool_get_drvinfo, + .get_regs_len = efx_ethtool_get_regs_len, + .get_regs = efx_ethtool_get_regs, + .get_msglevel = efx_ethtool_get_msglevel, + .set_msglevel = efx_ethtool_set_msglevel, .nway_reset = efx_ethtool_nway_reset, .get_link = efx_ethtool_get_link, .get_eeprom_len = efx_ethtool_get_eeprom_len, @@ -854,7 +984,7 @@ const struct ethtool_ops efx_ethtool_ops = { /* Need to enable/disable TSO-IPv6 too */ .set_tso = efx_ethtool_set_tso, .get_flags = ethtool_op_get_flags, - .set_flags = ethtool_op_set_flags, + .set_flags = efx_ethtool_set_flags, .get_sset_count = efx_ethtool_get_sset_count, .self_test = efx_ethtool_self_test, .get_strings = efx_ethtool_get_strings, @@ -863,4 +993,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_wol = efx_ethtool_get_wol, .set_wol = efx_ethtool_set_wol, .reset = efx_ethtool_reset, + .get_rxnfc = efx_ethtool_get_rxnfc, + .get_rxfh_indir = efx_ethtool_get_rxfh_indir, + .set_rxfh_indir = efx_ethtool_set_rxfh_indir, }; diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 655b697b45b2db80a147722a0d1c91bedb093672..4f9d33f3cca1899a9fa745af4cb49c734383031f 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -167,13 +167,15 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) * exit without having touched the hardware. */ if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { - EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq, - raw_smp_processor_id()); + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d not for me\n", irq, + raw_smp_processor_id()); return IRQ_NONE; } efx->last_irq_cpu = raw_smp_processor_id(); - EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", - irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); /* Determine interrupting queues, clear interrupt status * register and acknowledge the device interrupt. @@ -239,7 +241,8 @@ static int falcon_spi_wait(struct efx_nic *efx) if (!falcon_spi_poll(efx)) return 0; if (time_after_eq(jiffies, timeout)) { - EFX_ERR(efx, "timed out waiting for SPI\n"); + netif_err(efx, hw, efx->net_dev, + "timed out waiting for SPI\n"); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); @@ -333,9 +336,10 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) if (!(status & SPI_STATUS_NRDY)) return 0; if (time_after_eq(jiffies, timeout)) { - EFX_ERR(efx, "SPI write timeout on device %d" - " last status=0x%02x\n", - spi->device_id, status); + netif_err(efx, hw, efx->net_dev, + "SPI write timeout on device %d" + " last status=0x%02x\n", + spi->device_id, status); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); @@ -469,7 +473,8 @@ static void falcon_reset_macs(struct efx_nic *efx) udelay(10); } - EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); + netif_err(efx, hw, efx->net_dev, + "timed out waiting for XMAC core reset\n"); } } @@ -492,12 +497,13 @@ static void falcon_reset_macs(struct efx_nic *efx) if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { - EFX_LOG(efx, "Completed MAC reset after %d loops\n", - count); + netif_dbg(efx, hw, efx->net_dev, + "Completed MAC reset after %d loops\n", + count); break; } if (count > 20) { - EFX_ERR(efx, "MAC reset failed\n"); + netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); break; } count++; @@ -548,7 +554,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; efx_oword_t reg; - int link_speed; + int link_speed, isolate; + + isolate = (efx->reset_pending != RESET_TYPE_NONE); switch (link_state->speed) { case 10000: link_speed = 3; break; @@ -570,7 +578,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) * discarded. */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, - !link_state->up); + !link_state->up || isolate); } efx_writeo(efx, ®, FR_AB_MAC_CTRL); @@ -584,7 +592,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); /* Unisolate the MAC -> RX */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) - EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); efx_writeo(efx, ®, FR_AZ_RX_CFG); } @@ -625,7 +633,8 @@ static void falcon_stats_complete(struct efx_nic *efx) rmb(); /* read the done flag before the stats */ efx->mac_op->update_stats(efx); } else { - EFX_ERR(efx, "timed out waiting for statistics\n"); + netif_err(efx, hw, efx->net_dev, + "timed out waiting for statistics\n"); } } @@ -715,16 +724,17 @@ static int falcon_gmii_wait(struct efx_nic *efx) if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { - EFX_ERR(efx, "error from GMII access " - EFX_OWORD_FMT"\n", - EFX_OWORD_VAL(md_stat)); + netif_err(efx, hw, efx->net_dev, + "error from GMII access " + EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(md_stat)); return -EIO; } return 0; } udelay(10); } - EFX_ERR(efx, "timed out waiting for GMII\n"); + netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); return -ETIMEDOUT; } @@ -736,7 +746,8 @@ static int falcon_mdio_write(struct net_device *net_dev, efx_oword_t reg; int rc; - EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", + netif_vdbg(efx, hw, efx->net_dev, + "writing MDIO %d register %d.%d with 0x%04x\n", prtad, devad, addr, value); mutex_lock(&efx->mdio_lock); @@ -810,8 +821,9 @@ static int falcon_mdio_read(struct net_device *net_dev, if (rc == 0) { efx_reado(efx, ®, FR_AB_MD_RXD); rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); - EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", - prtad, devad, addr, rc); + netif_vdbg(efx, hw, efx->net_dev, + "read from MDIO %d register %d.%d, got %04x\n", + prtad, devad, addr, rc); } else { /* Abort the read operation */ EFX_POPULATE_OWORD_2(reg, @@ -819,8 +831,9 @@ static int falcon_mdio_read(struct net_device *net_dev, FRF_AB_MD_GC, 1); efx_writeo(efx, ®, FR_AB_MD_CS); - EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", - prtad, devad, addr, rc); + netif_dbg(efx, hw, efx->net_dev, + "read from MDIO %d register %d.%d, got error %d\n", + prtad, devad, addr, rc); } out: @@ -871,7 +884,8 @@ static void falcon_switch_mac(struct efx_nic *efx) falcon_clock_mac(efx); - EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); + netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n", + EFX_IS10G(efx) ? 'X' : 'G'); /* Not all macs support a mac-level link state */ efx->xmac_poll_required = false; falcon_reset_macs(efx); @@ -895,8 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx) efx->phy_op = &falcon_qt202x_phy_ops; break; default: - EFX_ERR(efx, "Unknown PHY type %d\n", - efx->phy_type); + netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", + efx->phy_type); return -ENODEV; } @@ -924,10 +938,11 @@ static int falcon_probe_port(struct efx_nic *efx) FALCON_MAC_STATS_SIZE); if (rc) return rc; - EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", - (u64)efx->stats_buffer.dma_addr, - efx->stats_buffer.addr, - (u64)virt_to_phys(efx->stats_buffer.addr)); + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64)efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64)virt_to_phys(efx->stats_buffer.addr)); return 0; } @@ -967,8 +982,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); mutex_unlock(&efx->spi_lock); if (rc) { - EFX_ERR(efx, "Failed to read %s\n", - efx->spi_flash ? "flash" : "EEPROM"); + netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", + efx->spi_flash ? "flash" : "EEPROM"); rc = -EIO; goto out; } @@ -978,11 +993,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) rc = -EINVAL; if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { - EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); + netif_err(efx, hw, efx->net_dev, + "NVRAM bad magic 0x%x\n", magic_num); goto out; } if (struct_ver < 2) { - EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver); + netif_err(efx, hw, efx->net_dev, + "NVRAM has ancient version 0x%x\n", struct_ver); goto out; } else if (struct_ver < 4) { word = &nvconfig->board_magic_num; @@ -995,7 +1012,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) csum += le16_to_cpu(*word); if (~csum & 0xffff) { - EFX_ERR(efx, "NVRAM has incorrect checksum\n"); + netif_err(efx, hw, efx->net_dev, + "NVRAM has incorrect checksum\n"); goto out; } @@ -1073,22 +1091,25 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) efx_oword_t glb_ctl_reg_ker; int rc; - EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method)); + netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", + RESET_TYPE(method)); /* Initiate device reset */ if (method == RESET_TYPE_WORLD) { rc = pci_save_state(efx->pci_dev); if (rc) { - EFX_ERR(efx, "failed to backup PCI state of primary " - "function prior to hardware reset\n"); + netif_err(efx, drv, efx->net_dev, + "failed to backup PCI state of primary " + "function prior to hardware reset\n"); goto fail1; } if (efx_nic_is_dual_func(efx)) { rc = pci_save_state(nic_data->pci_dev2); if (rc) { - EFX_ERR(efx, "failed to backup PCI state of " - "secondary function prior to " - "hardware reset\n"); + netif_err(efx, drv, efx->net_dev, + "failed to backup PCI state of " + "secondary function prior to " + "hardware reset\n"); goto fail2; } } @@ -1113,7 +1134,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) } efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); - EFX_LOG(efx, "waiting for hardware reset\n"); + netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); schedule_timeout_uninterruptible(HZ / 20); /* Restore PCI configuration if needed */ @@ -1121,28 +1142,32 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) if (efx_nic_is_dual_func(efx)) { rc = pci_restore_state(nic_data->pci_dev2); if (rc) { - EFX_ERR(efx, "failed to restore PCI config for " - "the secondary function\n"); + netif_err(efx, drv, efx->net_dev, + "failed to restore PCI config for " + "the secondary function\n"); goto fail3; } } rc = pci_restore_state(efx->pci_dev); if (rc) { - EFX_ERR(efx, "failed to restore PCI config for the " - "primary function\n"); + netif_err(efx, drv, efx->net_dev, + "failed to restore PCI config for the " + "primary function\n"); goto fail4; } - EFX_LOG(efx, "successfully restored PCI config\n"); + netif_dbg(efx, drv, efx->net_dev, + "successfully restored PCI config\n"); } /* Assert that reset complete */ efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { rc = -ETIMEDOUT; - EFX_ERR(efx, "timed out waiting for hardware reset\n"); + netif_err(efx, hw, efx->net_dev, + "timed out waiting for hardware reset\n"); goto fail5; } - EFX_LOG(efx, "hardware reset complete\n"); + netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); return 0; @@ -1165,8 +1190,9 @@ static void falcon_monitor(struct efx_nic *efx) rc = falcon_board(efx)->type->monitor(efx); if (rc) { - EFX_ERR(efx, "Board sensor %s; shutting down PHY\n", - (rc == -ERANGE) ? "reported fault" : "failed"); + netif_err(efx, hw, efx->net_dev, + "Board sensor %s; shutting down PHY\n", + (rc == -ERANGE) ? "reported fault" : "failed"); efx->phy_mode |= PHY_MODE_LOW_POWER; rc = __efx_reconfigure_port(efx); WARN_ON(rc); @@ -1217,7 +1243,8 @@ static int falcon_reset_sram(struct efx_nic *efx) /* Wait for SRAM reset to complete */ count = 0; do { - EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count); + netif_dbg(efx, hw, efx->net_dev, + "waiting for SRAM reset (attempt %d)...\n", count); /* SRAM reset is slow; expect around 16ms */ schedule_timeout_uninterruptible(HZ / 50); @@ -1225,13 +1252,14 @@ static int falcon_reset_sram(struct efx_nic *efx) /* Check for reset complete */ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { - EFX_LOG(efx, "SRAM reset complete\n"); + netif_dbg(efx, hw, efx->net_dev, + "SRAM reset complete\n"); return 0; } } while (++count < 20); /* wait upto 0.4 sec */ - EFX_ERR(efx, "timed out waiting for SRAM reset\n"); + netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; } @@ -1290,7 +1318,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) rc = falcon_read_nvram(efx, nvconfig); if (rc == -EINVAL) { - EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); + netif_err(efx, probe, efx->net_dev, + "NVRAM is invalid therefore using defaults\n"); efx->phy_type = PHY_TYPE_NONE; efx->mdio.prtad = MDIO_PRTAD_NONE; board_rev = 0; @@ -1324,7 +1353,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) /* Read the MAC addresses */ memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); - EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); + netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", + efx->phy_type, efx->mdio.prtad); rc = falcon_probe_board(efx, board_rev); if (rc) @@ -1353,14 +1383,16 @@ static void falcon_probe_spi_devices(struct efx_nic *efx) if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); - EFX_LOG(efx, "Booted from %s\n", - boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); + netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", + boot_dev == FFE_AB_SPI_DEVICE_FLASH ? + "flash" : "EEPROM"); } else { /* Disable VPD and set clock dividers to safe * values for initial programming. */ boot_dev = -1; - EFX_LOG(efx, "Booted from internal ASIC settings;" - " setting SPI config\n"); + netif_dbg(efx, probe, efx->net_dev, + "Booted from internal ASIC settings;" + " setting SPI config\n"); EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, /* 125 MHz / 7 ~= 20 MHz */ FRF_AB_EE_SF_CLOCK_DIV, 7, @@ -1394,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx) rc = -ENODEV; if (efx_nic_fpga_ver(efx) != 0) { - EFX_ERR(efx, "Falcon FPGA not supported\n"); + netif_err(efx, probe, efx->net_dev, + "Falcon FPGA not supported\n"); goto fail1; } @@ -1404,16 +1437,19 @@ static int falcon_probe_nic(struct efx_nic *efx) u8 pci_rev = efx->pci_dev->revision; if ((pci_rev == 0xff) || (pci_rev == 0)) { - EFX_ERR(efx, "Falcon rev A0 not supported\n"); + netif_err(efx, probe, efx->net_dev, + "Falcon rev A0 not supported\n"); goto fail1; } efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { - EFX_ERR(efx, "Falcon rev A1 1G not supported\n"); + netif_err(efx, probe, efx->net_dev, + "Falcon rev A1 1G not supported\n"); goto fail1; } if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { - EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); + netif_err(efx, probe, efx->net_dev, + "Falcon rev A1 PCI-X not supported\n"); goto fail1; } @@ -1427,7 +1463,8 @@ static int falcon_probe_nic(struct efx_nic *efx) } } if (!nic_data->pci_dev2) { - EFX_ERR(efx, "failed to find secondary function\n"); + netif_err(efx, probe, efx->net_dev, + "failed to find secondary function\n"); rc = -ENODEV; goto fail2; } @@ -1436,7 +1473,7 @@ static int falcon_probe_nic(struct efx_nic *efx) /* Now we can reset the NIC */ rc = falcon_reset_hw(efx, RESET_TYPE_ALL); if (rc) { - EFX_ERR(efx, "failed to reset NIC\n"); + netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } @@ -1446,9 +1483,11 @@ static int falcon_probe_nic(struct efx_nic *efx) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); - EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", - (u64)efx->irq_status.dma_addr, - efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); + netif_dbg(efx, probe, efx->net_dev, + "INT_KER at %llx (virt %p phys %llx)\n", + (u64)efx->irq_status.dma_addr, + efx->irq_status.addr, + (u64)virt_to_phys(efx->irq_status.addr)); falcon_probe_spi_devices(efx); @@ -1472,7 +1511,8 @@ static int falcon_probe_nic(struct efx_nic *efx) rc = falcon_board(efx)->type->init(efx); if (rc) { - EFX_ERR(efx, "failed to initialise board\n"); + netif_err(efx, probe, efx->net_dev, + "failed to initialise board\n"); goto fail6; } @@ -1542,6 +1582,13 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); + + /* Enable hash insertion. This is broken for the + * 'Falcon' hash so also select Toeplitz TCP/IPv4 and + * IPv4 hashes. */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1); } /* Always enable XOFF signal from RX FIFO. We enable * or disable transmission of pause frames at the MAC. */ @@ -1615,8 +1662,12 @@ static int falcon_init_nic(struct efx_nic *efx) falcon_init_rx_cfg(efx); - /* Set destination of both TX and RX Flush events */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + /* Set hash key for IPv4 */ + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + /* Set destination of both TX and RX Flush events */ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); efx_writeo(efx, &temp, FR_BZ_DP_CTRL); } @@ -1821,6 +1872,7 @@ struct efx_nic_type falcon_b0_nic_type = { .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy @@ -1828,7 +1880,7 @@ struct efx_nic_type falcon_b0_nic_type = { * channels */ .tx_dc_base = 0x130000, .rx_dc_base = 0x100000, - .offload_features = NETIF_F_IP_CSUM, + .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH, .reset_world_flags = ETH_RESET_IRQ, }; diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index c7a933a3292e6797b6bc55e87d8499268a63976a..3d950c2cf205965d6aafced30330acd235d48734 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c @@ -106,12 +106,17 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask) alarms1 &= mask; alarms2 &= mask >> 8; if (alarms1 || alarms2) { - EFX_ERR(efx, - "LM87 detected a hardware failure (status %02x:%02x)" - "%s%s\n", - alarms1, alarms2, - (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "", - (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : ""); + netif_err(efx, hw, efx->net_dev, + "LM87 detected a hardware failure (status %02x:%02x)" + "%s%s%s\n", + alarms1, alarms2, + (alarms1 & LM87_ALARM_TEMP_INT) ? + "; board is overheating" : "", + (alarms1 & LM87_ALARM_TEMP_EXT1) ? + "; controller is overheating" : "", + (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1) + || alarms2) ? + "; electrical fault" : ""); return -ERANGE; } @@ -243,7 +248,7 @@ static int sfe4001_poweron(struct efx_nic *efx) (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | (0 << P0_EN_1V0X_LBN)); if (rc != out) { - EFX_INFO(efx, "power-cycling PHY\n"); + netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n"); rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); if (rc) goto fail_on; @@ -269,7 +274,8 @@ static int sfe4001_poweron(struct efx_nic *efx) if (rc) goto fail_on; - EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i); + netif_info(efx, hw, efx->net_dev, + "waiting for DSP boot (attempt %d)...\n", i); /* In flash config mode, DSP does not turn on AFE, so * just wait 1 second. @@ -291,7 +297,7 @@ static int sfe4001_poweron(struct efx_nic *efx) } } - EFX_INFO(efx, "timed out waiting for DSP boot\n"); + netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n"); rc = -ETIMEDOUT; fail_on: sfe4001_poweroff(efx); @@ -377,7 +383,7 @@ static void sfe4001_fini(struct efx_nic *efx) { struct falcon_board *board = falcon_board(efx); - EFX_INFO(efx, "%s\n", __func__); + netif_info(efx, drv, efx->net_dev, "%s\n", __func__); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); sfe4001_poweroff(efx); @@ -461,7 +467,7 @@ static int sfe4001_init(struct efx_nic *efx) if (rc) goto fail_on; - EFX_INFO(efx, "PHY is powered on\n"); + netif_info(efx, hw, efx->net_dev, "PHY is powered on\n"); return 0; fail_on: @@ -493,7 +499,7 @@ static int sfn4111t_check_hw(struct efx_nic *efx) static void sfn4111t_fini(struct efx_nic *efx) { - EFX_INFO(efx, "%s\n", __func__); + netif_info(efx, drv, efx->net_dev, "%s\n", __func__); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); i2c_unregister_device(falcon_board(efx)->hwmon_client); @@ -742,13 +748,14 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info) board->type = &board_types[i]; if (board->type) { - EFX_INFO(efx, "board is %s rev %c%d\n", + netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n", (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) ? board->type->ref_model : board->type->gen_type, 'A' + board->major, board->minor); return 0; } else { - EFX_ERR(efx, "unknown board type %d\n", type_id); + netif_err(efx, probe, efx->net_dev, "unknown board type %d\n", + type_id); return -ENODEV; } } diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index c84a2ce2ccbbb905458d110172f2d272f00edf2a..bae656dd2c4ee800d161caf3d47255aca07dae40 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c @@ -81,7 +81,8 @@ int falcon_reset_xaui(struct efx_nic *efx) } udelay(10); } - EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); + netif_err(efx, hw, efx->net_dev, + "timed out waiting for XAUI/XGXS reset\n"); return -ETIMEDOUT; } @@ -256,7 +257,7 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) falcon_stop_nic_stats(efx); while (!mac_up && tries) { - EFX_LOG(efx, "bashing xaui\n"); + netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); falcon_reset_xaui(efx); udelay(200); diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index b89177c27f4af3fa75789931a43d8f1d0cb6e9f2..85a99fe87437e409b7c946244e0d039235681988 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h @@ -78,8 +78,9 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, { unsigned long flags __attribute__ ((unused)); - EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg, - EFX_OWORD_VAL(*value)); + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); spin_lock_irqsave(&efx->biu_lock, flags); #ifdef EFX_USE_QWORD_IO @@ -105,8 +106,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, unsigned int addr = index * sizeof(*value); unsigned long flags __attribute__ ((unused)); - EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n", - addr, EFX_QWORD_VAL(*value)); + netif_vdbg(efx, hw, efx->net_dev, + "writing SRAM address %x with " EFX_QWORD_FMT "\n", + addr, EFX_QWORD_VAL(*value)); spin_lock_irqsave(&efx->biu_lock, flags); #ifdef EFX_USE_QWORD_IO @@ -129,8 +131,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, unsigned int reg) { - EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n", - reg, EFX_DWORD_VAL(*value)); + netif_vdbg(efx, hw, efx->net_dev, + "writing partial register %x with "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); /* No lock required */ _efx_writed(efx, value->u32[0], reg); @@ -155,8 +158,9 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, value->u32[3] = _efx_readd(efx, reg + 12); spin_unlock_irqrestore(&efx->biu_lock, flags); - EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg, - EFX_OWORD_VAL(*value)); + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); } /* Read an 8-byte SRAM entry through supplied mapping, @@ -177,8 +181,9 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, #endif spin_unlock_irqrestore(&efx->biu_lock, flags); - EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n", - addr, EFX_QWORD_VAL(*value)); + netif_vdbg(efx, hw, efx->net_dev, + "read from SRAM address %x, got "EFX_QWORD_FMT"\n", + addr, EFX_QWORD_VAL(*value)); } /* Read dword from register that allows partial writes (sic) */ @@ -186,8 +191,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, unsigned int reg) { value->u32[0] = _efx_readd(efx, reg); - EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n", - reg, EFX_DWORD_VAL(*value)); + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); } /* Write to a register forming part of a table */ @@ -211,6 +217,13 @@ static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); } +/* Read from a dword register forming part of a table */ +static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, + unsigned int reg, unsigned int index) +{ + efx_readd(efx, value, reg + index * sizeof(efx_dword_t)); +} + /* Page-mapped register block size */ #define EFX_PAGE_BLOCK_SIZE 0x2000 diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 93cc3c1b9450b28cb0587617855818af8de580e5..3912b8fed912d5f2bb488acc4bdccf439b0c5fe7 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -168,11 +168,12 @@ static int efx_mcdi_poll(struct efx_nic *efx) error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); if (error && mcdi->resplen == 0) { - EFX_ERR(efx, "MC rebooted\n"); + netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); rc = EIO; } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { - EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", - respseq, mcdi->seqno); + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx seq 0x%x\n", + respseq, mcdi->seqno); rc = EIO; } else if (error) { efx_readd(efx, ®, pdu + 4); @@ -303,8 +304,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, /* The request has been cancelled */ --mcdi->credits; else - EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx " - "seq 0x%x\n", seqno, mcdi->seqno); + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx " + "seq 0x%x\n", seqno, mcdi->seqno); } else { mcdi->resprc = errno; mcdi->resplen = datalen; @@ -352,8 +354,9 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, ++mcdi->credits; spin_unlock_bh(&mcdi->iface_lock); - EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n", - cmd, (int)inlen, mcdi->mode); + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); } else { size_t resplen; @@ -374,11 +377,13 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, } else if (cmd == MC_CMD_REBOOT && rc == -EIO) ; /* Don't reset if MC_CMD_REBOOT returns EIO */ else if (rc == -EIO || rc == -EINTR) { - EFX_ERR(efx, "MC fatal error %d\n", -rc); + netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", + -rc); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } else - EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n", - cmd, (int)inlen, -rc); + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d failed rc=%d\n", + cmd, (int)inlen, -rc); } efx_mcdi_release(mcdi); @@ -534,8 +539,9 @@ static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); state_txt = sensor_status_names[state]; - EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n", - monitor, name, state_txt, value); + netif_err(efx, hw, efx->net_dev, + "Sensor %d (%s) reports condition '%s' for raw value %d\n", + monitor, name, state_txt, value); } /* Called from falcon_process_eventq for MCDI events */ @@ -548,12 +554,13 @@ void efx_mcdi_process_event(struct efx_channel *channel, switch (code) { case MCDI_EVENT_CODE_BADSSERT: - EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data); + netif_err(efx, hw, efx->net_dev, + "MC watchdog or assertion failure at 0x%x\n", data); efx_mcdi_ev_death(efx, EINTR); break; case MCDI_EVENT_CODE_PMNOTICE: - EFX_INFO(efx, "MCDI PM event.\n"); + netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); break; case MCDI_EVENT_CODE_CMDDONE: @@ -570,10 +577,11 @@ void efx_mcdi_process_event(struct efx_channel *channel, efx_mcdi_sensor_event(efx, event); break; case MCDI_EVENT_CODE_SCHEDERR: - EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data); + netif_info(efx, hw, efx->net_dev, + "MC Scheduler error address=0x%x\n", data); break; case MCDI_EVENT_CODE_REBOOT: - EFX_INFO(efx, "MC Reboot\n"); + netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); efx_mcdi_ev_death(efx, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: @@ -581,7 +589,8 @@ void efx_mcdi_process_event(struct efx_channel *channel, break; default: - EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code); + netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", + code); } } @@ -627,7 +636,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -657,7 +666,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -695,7 +704,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", + __func__, rc, (int)outlen); return rc; } @@ -724,7 +734,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -749,8 +759,8 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", - __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); return rc; } @@ -781,7 +791,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -802,7 +812,7 @@ int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -827,7 +837,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -853,7 +863,7 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -877,7 +887,7 @@ int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -898,7 +908,7 @@ int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -948,9 +958,10 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx) return 0; fail2: - EFX_ERR(efx, "%s: failed type=%u\n", __func__, type); + netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", + __func__, type); fail1: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -994,14 +1005,15 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) ? "watchdog reset" : "unknown assertion"; - EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, - MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), - MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); + netif_err(efx, hw, efx->net_dev, + "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); /* Print out the registers */ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; for (index = 1; index < 32; index++) { - EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index, + netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, MCDI_DWORD2(outbuf, ofst)); ofst += sizeof(efx_dword_t); } @@ -1050,14 +1062,16 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); } int efx_mcdi_reset_port(struct efx_nic *efx) { int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); if (rc) - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); return rc; } @@ -1075,7 +1089,7 @@ int efx_mcdi_reset_mc(struct efx_nic *efx) return 0; if (rc == 0) rc = -EIO; - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1108,7 +1122,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, fail: *id_out = -1; - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1143,7 +1157,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) fail: *id_out = -1; - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1163,7 +1177,7 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1179,7 +1193,7 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c index 39182631ac924a3938e3cbb39e1d28073c5cca55..f88f4bf986ff423d56ea6c8507b1254dc1dfec86 100644 --- a/drivers/net/sfc/mcdi_mac.c +++ b/drivers/net/sfc/mcdi_mac.c @@ -69,8 +69,8 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", - __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); return rc; } @@ -110,8 +110,8 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, return 0; fail: - EFX_ERR(efx, "%s: %s failed rc=%d\n", - __func__, enable ? "enable" : "disable", rc); + netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n", + __func__, enable ? "enable" : "disable", rc); return rc; } diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c index 6032c0e1f1f8bcd261d37fb89b69885e48635cea..0121e71702bf9a5183ece1870c544f409d44b05f 100644 --- a/drivers/net/sfc/mcdi_phy.c +++ b/drivers/net/sfc/mcdi_phy.c @@ -20,7 +20,7 @@ #include "nic.h" #include "selftest.h" -struct efx_mcdi_phy_cfg { +struct efx_mcdi_phy_data { u32 flags; u32 type; u32 supported_cap; @@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg { }; static int -efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg) +efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) { u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; size_t outlen; @@ -71,7 +71,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -97,7 +97,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -122,7 +122,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -150,7 +150,7 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -178,7 +178,7 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, return 0; fail: - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap) static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) { - struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; enum efx_phy_mode mode, supported; u32 flags; @@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media) static int efx_mcdi_phy_probe(struct efx_nic *efx) { - struct efx_mcdi_phy_cfg *phy_data; + struct efx_mcdi_phy_data *phy_data; u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; u32 caps; int rc; @@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) efx->wanted_fc |= EFX_FC_AUTO; + efx_link_set_wanted_fc(efx, efx->wanted_fc); return 0; @@ -405,7 +406,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) int efx_mcdi_phy_reconfigure(struct efx_nic *efx) { - struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 caps = (efx->link_advertising ? ethtool_to_mcdi_cap(efx->link_advertising) : phy_cfg->forced_cap); @@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx, */ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) { - struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 rmtadv; /* The link partner capabilities are only relevent if the @@ -465,8 +466,8 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) rmtadv |= ADVERTISED_Asym_Pause; if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) - EFX_ERR(efx, "warning: link partner doesn't support " - "pause frames"); + netif_err(efx, link, efx->net_dev, + "warning: link partner doesn't support pause frames"); } static bool efx_mcdi_phy_poll(struct efx_nic *efx) @@ -482,7 +483,8 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx) rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), NULL); if (rc) { - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); efx->link_state.up = false; } else { efx_mcdi_phy_decode_link( @@ -505,7 +507,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx) static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) { - struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; int rc; @@ -525,7 +527,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), NULL); if (rc) { - EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); return; } ecmd->lp_advertising = @@ -535,7 +538,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) { - struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 caps; int rc; @@ -674,7 +677,7 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned flags) { - struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 mode; int rc; @@ -712,7 +715,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) { - struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { if (index == 0) diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index 0548fcbbdcd0982ec1b8d2ca9b278d935951d7bc..eeaf0bd64bd3c2873c74bb0e4664a44f478c4e8d 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c @@ -63,7 +63,8 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) /* Read MMD STATUS2 to check it is responding. */ status = efx_mdio_read(efx, mmd, MDIO_STAT2); if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) { - EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd); + netif_err(efx, hw, efx->net_dev, + "PHY MMD %d not responding.\n", mmd); return -EIO; } } @@ -72,12 +73,14 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) status = efx_mdio_read(efx, mmd, MDIO_STAT1); if (status & MDIO_STAT1_FAULT) { if (fault_fatal) { - EFX_ERR(efx, "PHY MMD %d reporting fatal" - " fault: status %x\n", mmd, status); + netif_err(efx, hw, efx->net_dev, + "PHY MMD %d reporting fatal" + " fault: status %x\n", mmd, status); return -EIO; } else { - EFX_LOG(efx, "PHY MMD %d reporting status" - " %x (expected)\n", mmd, status); + netif_dbg(efx, hw, efx->net_dev, + "PHY MMD %d reporting status" + " %x (expected)\n", mmd, status); } } return 0; @@ -103,8 +106,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask) if (mask & 1) { stat = efx_mdio_read(efx, mmd, MDIO_CTRL1); if (stat < 0) { - EFX_ERR(efx, "failed to read status of" - " MMD %d\n", mmd); + netif_err(efx, hw, efx->net_dev, + "failed to read status of" + " MMD %d\n", mmd); return -EIO; } if (stat & MDIO_CTRL1_RESET) @@ -119,8 +123,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask) msleep(spintime); } if (in_reset != 0) { - EFX_ERR(efx, "not all MMDs came out of reset in time." - " MMDs still in reset: %x\n", in_reset); + netif_err(efx, hw, efx->net_dev, + "not all MMDs came out of reset in time." + " MMDs still in reset: %x\n", in_reset); rc = -ETIMEDOUT; } return rc; @@ -142,16 +147,18 @@ int efx_mdio_check_mmds(struct efx_nic *efx, devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1); devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2); if (devs1 < 0 || devs2 < 0) { - EFX_ERR(efx, "failed to read devices present\n"); + netif_err(efx, hw, efx->net_dev, + "failed to read devices present\n"); return -EIO; } devices = devs1 | (devs2 << 16); if ((devices & mmd_mask) != mmd_mask) { - EFX_ERR(efx, "required MMDs not present: got %x, " - "wanted %x\n", devices, mmd_mask); + netif_err(efx, hw, efx->net_dev, + "required MMDs not present: got %x, wanted %x\n", + devices, mmd_mask); return -ENODEV; } - EFX_TRACE(efx, "Devices present: %x\n", devices); + netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices); /* Check all required MMDs are responding and happy. */ while (mmd_mask) { @@ -219,7 +226,7 @@ static void efx_mdio_set_mmd_lpower(struct efx_nic *efx, { int stat = efx_mdio_read(efx, mmd, MDIO_STAT1); - EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n", + netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n", mmd, lpower); if (stat & MDIO_STAT1_LPOWERABLE) { @@ -349,8 +356,8 @@ int efx_mdio_test_alive(struct efx_nic *efx) if ((physid1 == 0x0000) || (physid1 == 0xffff) || (physid2 == 0x0000) || (physid2 == 0xffff)) { - EFX_ERR(efx, "no MDIO PHY present with ID %d\n", - efx->mdio.prtad); + netif_err(efx, hw, efx->net_dev, + "no MDIO PHY present with ID %d\n", efx->mdio.prtad); rc = -EINVAL; } else { rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index f89e719296031194a6953eba58daebe66b08164b..75791d3d4963f0a1a445c1fc269b938bd3499ccb 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h @@ -51,7 +51,8 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx) sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN); if (!sync) - EFX_LOG(efx, "XGXS lane status: %x\n", lane_status); + netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n", + lane_status); return sync; } diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c index f3ac7f30b5e718e3178dbdcf2797b5a0d13703d7..02e54b4f701fe7dba48c7bb851fab668a62d6126 100644 --- a/drivers/net/sfc/mtd.c +++ b/drivers/net/sfc/mtd.c @@ -15,7 +15,6 @@ #include #include -#define EFX_DRIVER_NAME "sfc_mtd" #include "net_driver.h" #include "spi.h" #include "efx.h" @@ -71,8 +70,10 @@ static int siena_mtd_probe(struct efx_nic *efx); /* SPI utilities */ -static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) +static int +efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) { + struct efx_mtd *efx_mtd = part->mtd.priv; const struct efx_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = efx_mtd->efx; u8 status; @@ -92,7 +93,7 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible) if (signal_pending(current)) return -EINTR; } - EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name); + pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name); return -ETIMEDOUT; } @@ -131,8 +132,10 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi) return 0; } -static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) +static int +efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) { + struct efx_mtd *efx_mtd = part->mtd.priv; const struct efx_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = efx_mtd->efx; unsigned pos, block_len; @@ -156,7 +159,7 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len) NULL, 0); if (rc) return rc; - rc = efx_spi_slow_wait(efx_mtd, false); + rc = efx_spi_slow_wait(part, false); /* Verify the entire region has been wiped */ memset(empty, 0xff, sizeof(empty)); @@ -198,13 +201,14 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) static void efx_mtd_sync(struct mtd_info *mtd) { + struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd *efx_mtd = mtd->priv; - struct efx_nic *efx = efx_mtd->efx; int rc; rc = efx_mtd->ops->sync(mtd); if (rc) - EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc); + pr_err("%s: %s sync failed (%d)\n", + part->name, efx_mtd->name, rc); } static void efx_mtd_remove_partition(struct efx_mtd_partition *part) @@ -338,7 +342,7 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) rc = mutex_lock_interruptible(&efx->spi_lock); if (rc) return rc; - rc = efx_spi_erase(efx_mtd, part->offset + start, len); + rc = efx_spi_erase(part, part->offset + start, len); mutex_unlock(&efx->spi_lock); return rc; } @@ -363,12 +367,13 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, static int falcon_mtd_sync(struct mtd_info *mtd) { + struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = efx_mtd->efx; int rc; mutex_lock(&efx->spi_lock); - rc = efx_spi_slow_wait(efx_mtd, true); + rc = efx_spi_slow_wait(part, true); mutex_unlock(&efx->spi_lock); return rc; } diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 4762c91cb587f03c19eddd58abef2ea449a2e941..64e7caa4bbb5444798ef133df0ad54ef2c411863 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -13,11 +13,16 @@ #ifndef EFX_NET_DRIVER_H #define EFX_NET_DRIVER_H +#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG) +#define DEBUG +#endif + #include #include #include #include #include +#include #include #include #include @@ -34,9 +39,7 @@ * Build definitions * **************************************************************************/ -#ifndef EFX_DRIVER_NAME -#define EFX_DRIVER_NAME "sfc" -#endif + #define EFX_DRIVER_VERSION "3.0" #ifdef EFX_ENABLE_DEBUG @@ -47,35 +50,6 @@ #define EFX_WARN_ON_PARANOID(x) do {} while (0) #endif -/* Un-rate-limited logging */ -#define EFX_ERR(efx, fmt, args...) \ -dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) - -#define EFX_INFO(efx, fmt, args...) \ -dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) - -#ifdef EFX_ENABLE_DEBUG -#define EFX_LOG(efx, fmt, args...) \ -dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) -#else -#define EFX_LOG(efx, fmt, args...) \ -dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) -#endif - -#define EFX_TRACE(efx, fmt, args...) do {} while (0) - -#define EFX_REGDUMP(efx, fmt, args...) do {} while (0) - -/* Rate-limited logging */ -#define EFX_ERR_RL(efx, fmt, args...) \ -do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0) - -#define EFX_INFO_RL(efx, fmt, args...) \ -do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) - -#define EFX_LOG_RL(efx, fmt, args...) \ -do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) - /************************************************************************** * * Efx data structures @@ -221,7 +195,6 @@ struct efx_tx_queue { * If both this and skb are %NULL, the buffer slot is currently free. * @data: Pointer to ethernet header * @len: Buffer length, in bytes. - * @unmap_addr: DMA address to unmap */ struct efx_rx_buffer { dma_addr_t dma_addr; @@ -229,7 +202,24 @@ struct efx_rx_buffer { struct page *page; char *data; unsigned int len; - dma_addr_t unmap_addr; +}; + +/** + * struct efx_rx_page_state - Page-based rx buffer state + * + * Inserted at the start of every page allocated for receive buffers. + * Used to facilitate sharing dma mappings between recycled rx buffers + * and those passed up to the kernel. + * + * @refcnt: Number of struct efx_rx_buffer's referencing this page. + * When refcnt falls to zero, the page is unmapped for dma + * @dma_addr: The dma address of this page. + */ +struct efx_rx_page_state { + unsigned refcnt; + dma_addr_t dma_addr; + + unsigned int __pad[0] ____cacheline_aligned; }; /** @@ -242,10 +232,6 @@ struct efx_rx_buffer { * @added_count: Number of buffers added to the receive queue. * @notified_count: Number of buffers given to NIC (<= @added_count). * @removed_count: Number of buffers removed from the receive queue. - * @add_lock: Receive queue descriptor add spin lock. - * This lock must be held in order to add buffers to the RX - * descriptor ring (rxd and buffer) and to update added_count (but - * not removed_count). * @max_fill: RX descriptor maximum fill level (<= ring size) * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill * (<= @max_fill) @@ -259,12 +245,7 @@ struct efx_rx_buffer { * overflow was observed. It should never be set. * @alloc_page_count: RX allocation strategy counter. * @alloc_skb_count: RX allocation strategy counter. - * @work: Descriptor push work thread - * @buf_page: Page for next RX buffer. - * We can use a single page for multiple RX buffers. This tracks - * the remaining space in the allocation. - * @buf_dma_addr: Page's DMA address. - * @buf_data: Page's host address. + * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). * @flushed: Use when handling queue flushing */ struct efx_rx_queue { @@ -277,7 +258,6 @@ struct efx_rx_queue { int added_count; int notified_count; int removed_count; - spinlock_t add_lock; unsigned int max_fill; unsigned int fast_fill_trigger; unsigned int fast_fill_limit; @@ -285,12 +265,9 @@ struct efx_rx_queue { unsigned int min_overfill; unsigned int alloc_page_count; unsigned int alloc_skb_count; - struct delayed_work work; + struct timer_list slow_fill; unsigned int slow_fill_count; - struct page *buf_page; - dma_addr_t buf_dma_addr; - char *buf_data; enum efx_flush_state flushed; }; @@ -336,7 +313,7 @@ enum efx_rx_alloc_method { * @eventq: Event queue buffer * @eventq_read_ptr: Event queue read pointer * @last_eventq_read_ptr: Last event queue read pointer value. - * @eventq_magic: Event queue magic value for driver-generated test events + * @magic_count: Event queue test event count * @irq_count: Number of IRQs since last adaptive moderation decision * @irq_mod_score: IRQ moderation score * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors @@ -367,7 +344,7 @@ struct efx_channel { struct efx_special_buffer eventq; unsigned int eventq_read_ptr; unsigned int last_eventq_read_ptr; - unsigned int eventq_magic; + unsigned int magic_count; unsigned int irq_count; unsigned int irq_mod_score; @@ -658,6 +635,7 @@ union efx_multicast_hash { * @interrupt_mode: Interrupt mode * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues * @irq_rx_moderation: IRQ moderation time for RX event queues + * @msg_enable: Log message enable flags * @state: Device state flag. Serialised by the rtnl_lock. * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) * @tx_queue: TX DMA queues @@ -669,6 +647,7 @@ union efx_multicast_hash { * @n_tx_channels: Number of channels used for TX * @rx_buffer_len: RX buffer length * @rx_buffer_order: Order (log2) of number of pages for each RX buffer + * @rx_indir_table: Indirection table for RSS * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired * @irq_status: Interrupt status buffer @@ -740,6 +719,7 @@ struct efx_nic { enum efx_int_mode interrupt_mode; bool irq_rx_adaptive; unsigned int irq_rx_moderation; + u32 msg_enable; enum nic_state state; enum reset_type reset_pending; @@ -754,6 +734,8 @@ struct efx_nic { unsigned n_tx_channels; unsigned int rx_buffer_len; unsigned int rx_buffer_order; + u8 rx_hash_key[40]; + u32 rx_indir_table[128]; unsigned int_error_count; unsigned long int_error_expire; @@ -866,7 +848,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @evq_ptr_tbl_base: Event queue pointer table base address * @evq_rptr_tbl_base: Event queue read-pointer table base address * @max_dma_mask: Maximum possible DMA mask - * @rx_buffer_padding: Padding added to each RX buffer + * @rx_buffer_hash_size: Size of hash at start of RX buffer + * @rx_buffer_padding: Size of padding at end of RX buffer * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. * @phys_addr_channels: Number of channels with physically addressed @@ -910,6 +893,7 @@ struct efx_nic_type { unsigned int evq_ptr_tbl_base; unsigned int evq_rptr_tbl_base; u64 max_dma_mask; + unsigned int rx_buffer_hash_size; unsigned int rx_buffer_padding; unsigned int max_interrupt_mode; unsigned int phys_addr_channels; diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 5d3aaec58556b5ba8a47f66dad535899149afdda..f595d920c7c46b323f0a778e58c0984a19839be6 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); /* Depth of RX flush request fifo */ #define EFX_RX_FLUSH_COUNT 4 +/* Generated event code for efx_generate_test_event() */ +#define EFX_CHANNEL_MAGIC_TEST(_channel) \ + (0x00010100 + (_channel)->channel) + +/* Generated event code for efx_generate_fill_event() */ +#define EFX_CHANNEL_MAGIC_FILL(_channel) \ + (0x00010200 + (_channel)->channel) + /************************************************************************** * * Solarstorm hardware access @@ -171,9 +179,10 @@ int efx_nic_test_registers(struct efx_nic *efx, return 0; fail: - EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT - " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), - EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); + netif_err(efx, hw, efx->net_dev, + "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT + " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), + EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); return -EIO; } @@ -206,8 +215,9 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) for (i = 0; i < buffer->entries; i++) { index = buffer->index + i; dma_addr = buffer->dma_addr + (i * 4096); - EFX_LOG(efx, "mapping special buffer %d at %llx\n", - index, (unsigned long long)dma_addr); + netif_dbg(efx, probe, efx->net_dev, + "mapping special buffer %d at %llx\n", + index, (unsigned long long)dma_addr); EFX_POPULATE_QWORD_3(buf_desc, FRF_AZ_BUF_ADR_REGION, 0, FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, @@ -227,8 +237,8 @@ efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) if (!buffer->entries) return; - EFX_LOG(efx, "unmapping special buffers %d-%d\n", - buffer->index, buffer->index + buffer->entries - 1); + netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", + buffer->index, buffer->index + buffer->entries - 1); EFX_POPULATE_OWORD_4(buf_tbl_upd, FRF_AZ_BUF_UPD_CMD, 0, @@ -268,11 +278,12 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, buffer->index = efx->next_buffer_table; efx->next_buffer_table += buffer->entries; - EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, - buffer->index + buffer->entries - 1, - (u64)buffer->dma_addr, len, - buffer->addr, (u64)virt_to_phys(buffer->addr)); + netif_dbg(efx, probe, efx->net_dev, + "allocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->dma_addr, len, + buffer->addr, (u64)virt_to_phys(buffer->addr)); return 0; } @@ -283,11 +294,12 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) if (!buffer->addr) return; - EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, - buffer->index + buffer->entries - 1, - (u64)buffer->dma_addr, buffer->len, - buffer->addr, (u64)virt_to_phys(buffer->addr)); + netif_dbg(efx, hw, efx->net_dev, + "deallocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->dma_addr, buffer->len, + buffer->addr, (u64)virt_to_phys(buffer->addr)); pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, buffer->dma_addr); @@ -547,9 +559,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; bool iscsi_digest_en = is_b0; - EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", - rx_queue->queue, rx_queue->rxd.index, - rx_queue->rxd.index + rx_queue->rxd.entries - 1); + netif_dbg(efx, hw, efx->net_dev, + "RX queue %d ring in special buffers %d-%d\n", + rx_queue->queue, rx_queue->rxd.index, + rx_queue->rxd.index + rx_queue->rxd.entries - 1); rx_queue->flushed = FLUSH_NONE; @@ -686,9 +699,10 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) EFX_WORKAROUND_10727(efx)) { efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); } else { - EFX_ERR(efx, "channel %d unexpected TX event " - EFX_QWORD_FMT"\n", channel->channel, - EFX_QWORD_VAL(*event)); + netif_err(efx, tx_err, efx->net_dev, + "channel %d unexpected TX event " + EFX_QWORD_FMT"\n", channel->channel, + EFX_QWORD_VAL(*event)); } return tx_packets; @@ -751,20 +765,21 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, * to a FIFO overflow. */ #ifdef EFX_ENABLE_DEBUG - if (rx_ev_other_err) { - EFX_INFO_RL(efx, " RX queue %d unexpected RX event " - EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", - rx_queue->queue, EFX_QWORD_VAL(*event), - rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", - rx_ev_ip_hdr_chksum_err ? - " [IP_HDR_CHKSUM_ERR]" : "", - rx_ev_tcp_udp_chksum_err ? - " [TCP_UDP_CHKSUM_ERR]" : "", - rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", - rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", - rx_ev_drib_nib ? " [DRIB_NIB]" : "", - rx_ev_tobe_disc ? " [TOBE_DISC]" : "", - rx_ev_pause_frm ? " [PAUSE]" : ""); + if (rx_ev_other_err && net_ratelimit()) { + netif_dbg(efx, rx_err, efx->net_dev, + " RX queue %d unexpected RX event " + EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", + rx_queue->queue, EFX_QWORD_VAL(*event), + rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", + rx_ev_ip_hdr_chksum_err ? + " [IP_HDR_CHKSUM_ERR]" : "", + rx_ev_tcp_udp_chksum_err ? + " [TCP_UDP_CHKSUM_ERR]" : "", + rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", + rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", + rx_ev_drib_nib ? " [DRIB_NIB]" : "", + rx_ev_tobe_disc ? " [TOBE_DISC]" : "", + rx_ev_pause_frm ? " [PAUSE]" : ""); } #endif } @@ -778,8 +793,9 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) expected = rx_queue->removed_count & EFX_RXQ_MASK; dropped = (index - expected) & EFX_RXQ_MASK; - EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", - dropped, index, expected); + netif_info(efx, rx_err, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, index, expected); efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); @@ -850,6 +866,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) checksummed, discard); } +static void +efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + unsigned code; + + code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); + if (code == EFX_CHANNEL_MAGIC_TEST(channel)) + ++channel->magic_count; + else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here */ + efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); + else + netif_dbg(efx, hw, efx->net_dev, "channel %d received " + "generated event "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(*event)); +} + /* Global events are basically PHY events */ static void efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) @@ -873,8 +909,9 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { - EFX_ERR(efx, "channel %d seen global RX_RESET " - "event. Resetting.\n", channel->channel); + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen global RX_RESET event. Resetting.\n", + channel->channel); atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? @@ -883,9 +920,10 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) } if (!handled) - EFX_ERR(efx, "channel %d unknown global event " - EFX_QWORD_FMT "\n", channel->channel, - EFX_QWORD_VAL(*event)); + netif_err(efx, hw, efx->net_dev, + "channel %d unknown global event " + EFX_QWORD_FMT "\n", channel->channel, + EFX_QWORD_VAL(*event)); } static void @@ -900,31 +938,35 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) switch (ev_sub_code) { case FSE_AZ_TX_DESCQ_FLS_DONE_EV: - EFX_TRACE(efx, "channel %d TXQ %d flushed\n", - channel->channel, ev_sub_data); + netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", + channel->channel, ev_sub_data); break; case FSE_AZ_RX_DESCQ_FLS_DONE_EV: - EFX_TRACE(efx, "channel %d RXQ %d flushed\n", - channel->channel, ev_sub_data); + netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", + channel->channel, ev_sub_data); break; case FSE_AZ_EVQ_INIT_DONE_EV: - EFX_LOG(efx, "channel %d EVQ %d initialised\n", - channel->channel, ev_sub_data); + netif_dbg(efx, hw, efx->net_dev, + "channel %d EVQ %d initialised\n", + channel->channel, ev_sub_data); break; case FSE_AZ_SRM_UPD_DONE_EV: - EFX_TRACE(efx, "channel %d SRAM update done\n", - channel->channel); + netif_vdbg(efx, hw, efx->net_dev, + "channel %d SRAM update done\n", channel->channel); break; case FSE_AZ_WAKE_UP_EV: - EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", - channel->channel, ev_sub_data); + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RXQ %d wakeup event\n", + channel->channel, ev_sub_data); break; case FSE_AZ_TIMER_EV: - EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", - channel->channel, ev_sub_data); + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RX queue %d timer expired\n", + channel->channel, ev_sub_data); break; case FSE_AA_RX_RECOVER_EV: - EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen DRIVER RX_RESET event. " "Resetting.\n", channel->channel); atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, @@ -933,19 +975,22 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) RESET_TYPE_DISABLE); break; case FSE_BZ_RX_DSC_ERROR_EV: - EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." - " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); + netif_err(efx, rx_err, efx->net_dev, + "RX DMA Q %d reports descriptor fetch error." + " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); break; case FSE_BZ_TX_DSC_ERROR_EV: - EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." - " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); + netif_err(efx, tx_err, efx->net_dev, + "TX DMA Q %d reports descriptor fetch error." + " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); break; default: - EFX_TRACE(efx, "channel %d unknown driver event code %d " - "data %04x\n", channel->channel, ev_sub_code, - ev_sub_data); + netif_vdbg(efx, hw, efx->net_dev, + "channel %d unknown driver event code %d " + "data %04x\n", channel->channel, ev_sub_code, + ev_sub_data); break; } } @@ -968,8 +1013,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) /* End of events */ break; - EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", - channel->channel, EFX_QWORD_VAL(event)); + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d event is "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(event)); /* Clear this event by marking it all ones */ EFX_SET_QWORD(*p_event); @@ -993,11 +1039,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) } break; case FSE_AZ_EV_CODE_DRV_GEN_EV: - channel->eventq_magic = EFX_QWORD_FIELD( - event, FSF_AZ_DRV_GEN_EV_MAGIC); - EFX_LOG(channel->efx, "channel %d received generated " - "event "EFX_QWORD_FMT"\n", channel->channel, - EFX_QWORD_VAL(event)); + efx_handle_generated_event(channel, &event); break; case FSE_AZ_EV_CODE_GLOBAL_EV: efx_handle_global_event(channel, &event); @@ -1009,9 +1051,10 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) efx_mcdi_process_event(channel, &event); break; default: - EFX_ERR(channel->efx, "channel %d unknown event type %d" - " (data " EFX_QWORD_FMT ")\n", channel->channel, - ev_code, EFX_QWORD_VAL(event)); + netif_err(channel->efx, hw, channel->efx->net_dev, + "channel %d unknown event type %d (data " + EFX_QWORD_FMT ")\n", channel->channel, + ev_code, EFX_QWORD_VAL(event)); } } @@ -1036,9 +1079,10 @@ void efx_nic_init_eventq(struct efx_channel *channel) efx_oword_t reg; struct efx_nic *efx = channel->efx; - EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", - channel->channel, channel->eventq.index, - channel->eventq.index + channel->eventq.entries - 1); + netif_dbg(efx, hw, efx->net_dev, + "channel %d event queue in special buffers %d-%d\n", + channel->channel, channel->eventq.index, + channel->eventq.index + channel->eventq.entries - 1); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { EFX_POPULATE_OWORD_3(reg, @@ -1088,12 +1132,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel) } -/* Generates a test event on the event queue. A subsequent call to - * process_eventq() should pick up the event and place the value of - * "magic" into channel->eventq_magic; - */ -void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic) +void efx_nic_generate_test_event(struct efx_channel *channel) { + unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); + efx_qword_t test_event; + + EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, + FSE_AZ_EV_CODE_DRV_GEN_EV, + FSF_AZ_DRV_GEN_EV_MAGIC, magic); + efx_generate_event(channel, &test_event); +} + +void efx_nic_generate_fill_event(struct efx_channel *channel) +{ + unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); efx_qword_t test_event; EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, @@ -1208,20 +1260,19 @@ int efx_nic_flush_queues(struct efx_nic *efx) * leading to a reset, or fake up success anyway */ efx_for_each_tx_queue(tx_queue, efx) { if (tx_queue->flushed != FLUSH_DONE) - EFX_ERR(efx, "tx queue %d flush command timed out\n", - tx_queue->queue); + netif_err(efx, hw, efx->net_dev, + "tx queue %d flush command timed out\n", + tx_queue->queue); tx_queue->flushed = FLUSH_DONE; } efx_for_each_rx_queue(rx_queue, efx) { if (rx_queue->flushed != FLUSH_DONE) - EFX_ERR(efx, "rx queue %d flush command timed out\n", - rx_queue->queue); + netif_err(efx, hw, efx->net_dev, + "rx queue %d flush command timed out\n", + rx_queue->queue); rx_queue->flushed = FLUSH_DONE; } - if (EFX_WORKAROUND_7803(efx)) - return 0; - return -ETIMEDOUT; } @@ -1290,10 +1341,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); - EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " - EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), - EFX_OWORD_VAL(fatal_intr), - error ? "disabling bus mastering" : "no recognised error"); + netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " + EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), + EFX_OWORD_VAL(fatal_intr), + error ? "disabling bus mastering" : "no recognised error"); /* If this is a memory parity error dump which blocks are offending */ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || @@ -1301,8 +1352,9 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) if (mem_perr) { efx_oword_t reg; efx_reado(efx, ®, FR_AZ_MEM_STAT); - EFX_ERR(efx, "SYSTEM ERROR: memory parity error " - EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(reg)); } /* Disable both devices */ @@ -1319,11 +1371,13 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) jiffies + EFX_INT_ERROR_EXPIRE * HZ; } if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { - EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - reset scheduled\n"); efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); } else { - EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." - "NIC will be disabled\n"); + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - max number of errors seen." + "NIC will be disabled\n"); efx_schedule_reset(efx, RESET_TYPE_DISABLE); } @@ -1386,8 +1440,9 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) if (result == IRQ_HANDLED) { efx->last_irq_cpu = raw_smp_processor_id(); - EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", - irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); } return result; @@ -1408,8 +1463,9 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) int syserr; efx->last_irq_cpu = raw_smp_processor_id(); - EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", - irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); /* Check to see if we have a serious error condition */ if (channel->channel == efx->fatal_irq_level) { @@ -1428,22 +1484,21 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) /* Setup RSS indirection table. * This maps from the hash value of the packet to RXQ */ -static void efx_setup_rss_indir_table(struct efx_nic *efx) +void efx_nic_push_rx_indir_table(struct efx_nic *efx) { - int i = 0; - unsigned long offset; + size_t i = 0; efx_dword_t dword; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return; - for (offset = FR_BZ_RX_INDIRECTION_TBL; - offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; - offset += 0x10) { + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, - i % efx->n_rx_channels); - efx_writed(efx, &dword, offset); - i++; + efx->rx_indir_table[i]); + efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); } } @@ -1465,8 +1520,9 @@ int efx_nic_init_interrupt(struct efx_nic *efx) rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, efx->name, efx); if (rc) { - EFX_ERR(efx, "failed to hook legacy IRQ %d\n", - efx->pci_dev->irq); + netif_err(efx, drv, efx->net_dev, + "failed to hook legacy IRQ %d\n", + efx->pci_dev->irq); goto fail1; } return 0; @@ -1478,7 +1534,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) IRQF_PROBE_SHARED, /* Not shared */ channel->name, channel); if (rc) { - EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); + netif_err(efx, drv, efx->net_dev, + "failed to hook IRQ %d\n", channel->irq); goto fail2; } } @@ -1576,7 +1633,7 @@ void efx_nic_init_common(struct efx_nic *efx) EFX_INVERT_OWORD(temp); efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); - efx_setup_rss_indir_table(efx); + efx_nic_push_rx_indir_table(efx); /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. @@ -1598,3 +1655,269 @@ void efx_nic_init_common(struct efx_nic *efx) EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); } + +/* Register dump */ + +#define REGISTER_REVISION_A 1 +#define REGISTER_REVISION_B 2 +#define REGISTER_REVISION_C 3 +#define REGISTER_REVISION_Z 3 /* latest revision */ + +struct efx_nic_reg { + u32 offset:24; + u32 min_revision:2, max_revision:2; +}; + +#define REGISTER(name, min_rev, max_rev) { \ + FR_ ## min_rev ## max_rev ## _ ## name, \ + REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ +} +#define REGISTER_AA(name) REGISTER(name, A, A) +#define REGISTER_AB(name) REGISTER(name, A, B) +#define REGISTER_AZ(name) REGISTER(name, A, Z) +#define REGISTER_BB(name) REGISTER(name, B, B) +#define REGISTER_BZ(name) REGISTER(name, B, Z) +#define REGISTER_CZ(name) REGISTER(name, C, Z) + +static const struct efx_nic_reg efx_nic_regs[] = { + REGISTER_AZ(ADR_REGION), + REGISTER_AZ(INT_EN_KER), + REGISTER_BZ(INT_EN_CHAR), + REGISTER_AZ(INT_ADR_KER), + REGISTER_BZ(INT_ADR_CHAR), + /* INT_ACK_KER is WO */ + /* INT_ISR0 is RC */ + REGISTER_AZ(HW_INIT), + REGISTER_CZ(USR_EV_CFG), + REGISTER_AB(EE_SPI_HCMD), + REGISTER_AB(EE_SPI_HADR), + REGISTER_AB(EE_SPI_HDATA), + REGISTER_AB(EE_BASE_PAGE), + REGISTER_AB(EE_VPD_CFG0), + /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ + /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ + /* PCIE_CORE_INDIRECT is indirect */ + REGISTER_AB(NIC_STAT), + REGISTER_AB(GPIO_CTL), + REGISTER_AB(GLB_CTL), + /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ + REGISTER_BZ(DP_CTRL), + REGISTER_AZ(MEM_STAT), + REGISTER_AZ(CS_DEBUG), + REGISTER_AZ(ALTERA_BUILD), + REGISTER_AZ(CSR_SPARE), + REGISTER_AB(PCIE_SD_CTL0123), + REGISTER_AB(PCIE_SD_CTL45), + REGISTER_AB(PCIE_PCS_CTL_STAT), + /* DEBUG_DATA_OUT is not used */ + /* DRV_EV is WO */ + REGISTER_AZ(EVQ_CTL), + REGISTER_AZ(EVQ_CNT1), + REGISTER_AZ(EVQ_CNT2), + REGISTER_AZ(BUF_TBL_CFG), + REGISTER_AZ(SRM_RX_DC_CFG), + REGISTER_AZ(SRM_TX_DC_CFG), + REGISTER_AZ(SRM_CFG), + /* BUF_TBL_UPD is WO */ + REGISTER_AZ(SRM_UPD_EVQ), + REGISTER_AZ(SRAM_PARITY), + REGISTER_AZ(RX_CFG), + REGISTER_BZ(RX_FILTER_CTL), + /* RX_FLUSH_DESCQ is WO */ + REGISTER_AZ(RX_DC_CFG), + REGISTER_AZ(RX_DC_PF_WM), + REGISTER_BZ(RX_RSS_TKEY), + /* RX_NODESC_DROP is RC */ + REGISTER_AA(RX_SELF_RST), + /* RX_DEBUG, RX_PUSH_DROP are not used */ + REGISTER_CZ(RX_RSS_IPV6_REG1), + REGISTER_CZ(RX_RSS_IPV6_REG2), + REGISTER_CZ(RX_RSS_IPV6_REG3), + /* TX_FLUSH_DESCQ is WO */ + REGISTER_AZ(TX_DC_CFG), + REGISTER_AA(TX_CHKSM_CFG), + REGISTER_AZ(TX_CFG), + /* TX_PUSH_DROP is not used */ + REGISTER_AZ(TX_RESERVED), + REGISTER_BZ(TX_PACE), + /* TX_PACE_DROP_QID is RC */ + REGISTER_BB(TX_VLAN), + REGISTER_BZ(TX_IPFIL_PORTEN), + REGISTER_AB(MD_TXD), + REGISTER_AB(MD_RXD), + REGISTER_AB(MD_CS), + REGISTER_AB(MD_PHY_ADR), + REGISTER_AB(MD_ID), + /* MD_STAT is RC */ + REGISTER_AB(MAC_STAT_DMA), + REGISTER_AB(MAC_CTRL), + REGISTER_BB(GEN_MODE), + REGISTER_AB(MAC_MC_HASH_REG0), + REGISTER_AB(MAC_MC_HASH_REG1), + REGISTER_AB(GM_CFG1), + REGISTER_AB(GM_CFG2), + /* GM_IPG and GM_HD are not used */ + REGISTER_AB(GM_MAX_FLEN), + /* GM_TEST is not used */ + REGISTER_AB(GM_ADR1), + REGISTER_AB(GM_ADR2), + REGISTER_AB(GMF_CFG0), + REGISTER_AB(GMF_CFG1), + REGISTER_AB(GMF_CFG2), + REGISTER_AB(GMF_CFG3), + REGISTER_AB(GMF_CFG4), + REGISTER_AB(GMF_CFG5), + REGISTER_BB(TX_SRC_MAC_CTL), + REGISTER_AB(XM_ADR_LO), + REGISTER_AB(XM_ADR_HI), + REGISTER_AB(XM_GLB_CFG), + REGISTER_AB(XM_TX_CFG), + REGISTER_AB(XM_RX_CFG), + REGISTER_AB(XM_MGT_INT_MASK), + REGISTER_AB(XM_FC), + REGISTER_AB(XM_PAUSE_TIME), + REGISTER_AB(XM_TX_PARAM), + REGISTER_AB(XM_RX_PARAM), + /* XM_MGT_INT_MSK (note no 'A') is RC */ + REGISTER_AB(XX_PWR_RST), + REGISTER_AB(XX_SD_CTL), + REGISTER_AB(XX_TXDRV_CTL), + /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ + /* XX_CORE_STAT is partly RC */ +}; + +struct efx_nic_reg_table { + u32 offset:24; + u32 min_revision:2, max_revision:2; + u32 step:6, rows:21; +}; + +#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ + offset, \ + REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ + step, rows \ +} +#define REGISTER_TABLE(name, min_rev, max_rev) \ + REGISTER_TABLE_DIMENSIONS( \ + name, FR_ ## min_rev ## max_rev ## _ ## name, \ + min_rev, max_rev, \ + FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ + FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) +#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) +#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) +#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) +#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) +#define REGISTER_TABLE_BB_CZ(name) \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ + FR_BZ_ ## name ## _STEP, \ + FR_BB_ ## name ## _ROWS), \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ + FR_BZ_ ## name ## _STEP, \ + FR_CZ_ ## name ## _ROWS) +#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) + +static const struct efx_nic_reg_table efx_nic_reg_tables[] = { + /* DRIVER is not used */ + /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ + REGISTER_TABLE_BB(TX_IPFIL_TBL), + REGISTER_TABLE_BB(TX_SRC_MAC_TBL), + REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), + REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), + REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), + /* The register buffer is allocated with slab, so we can't + * reasonably read all of the buffer table (up to 8MB!). + * However this driver will only use a few entries. Reading + * 1K entries allows for some expansion of queue count and + * size before we need to change the version. */ + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, + A, A, 8, 1024), + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, + B, Z, 8, 1024), + /* RX_FILTER_TBL{0,1} is huge and not used by this driver */ + REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), + REGISTER_TABLE_BB_CZ(TIMER_TBL), + REGISTER_TABLE_BB_CZ(TX_PACE_TBL), + REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), + /* TX_FILTER_TBL0 is huge and not used by this driver */ + REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), + REGISTER_TABLE_CZ(MC_TREG_SMEM), + /* MSIX_PBA_TABLE is not mapped */ + /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ +}; + +size_t efx_nic_get_regs_len(struct efx_nic *efx) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + size_t len = 0; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) + len += sizeof(efx_oword_t); + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) + if (efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision) + len += table->rows * min_t(size_t, table->step, 16); + + return len; +} + +void efx_nic_get_regs(struct efx_nic *efx, void *buf) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) { + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) { + efx_reado(efx, (efx_oword_t *)buf, reg->offset); + buf += sizeof(efx_oword_t); + } + } + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) { + size_t size, i; + + if (!(efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision)) + continue; + + size = min_t(size_t, table->step, 16); + + for (i = 0; i < table->rows; i++) { + switch (table->step) { + case 4: /* 32-bit register or SRAM */ + efx_readd_table(efx, buf, table->offset, i); + break; + case 8: /* 64-bit SRAM */ + efx_sram_readq(efx, + efx->membase + table->offset, + buf, i); + break; + case 16: /* 128-bit register */ + efx_reado_table(efx, buf, table->offset, i); + break; + case 32: /* 128-bit register, interleaved */ + efx_reado_table(efx, buf, table->offset, 2 * i); + break; + default: + WARN_ON(1); + return; + } + buf += size; + } + } +} diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index bbc2c0c2f8430d15f386fc93807bc8b86c33f245..0438dc98722d2b49e074c5370b44ae8b9857f916 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -142,7 +142,6 @@ struct siena_nic_data { u32 fw_build; struct efx_mcdi_iface mcdi; int wol_filter_id; - u8 ipv6_rss_key[40]; }; extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); @@ -190,8 +189,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh; /* Interrupts and test events */ extern int efx_nic_init_interrupt(struct efx_nic *efx); extern void efx_nic_enable_interrupts(struct efx_nic *efx); -extern void efx_nic_generate_test_event(struct efx_channel *channel, - unsigned int magic); +extern void efx_nic_generate_test_event(struct efx_channel *channel); +extern void efx_nic_generate_fill_event(struct efx_channel *channel); extern void efx_nic_generate_interrupt(struct efx_nic *efx); extern void efx_nic_disable_interrupts(struct efx_nic *efx); extern void efx_nic_fini_interrupt(struct efx_nic *efx); @@ -208,6 +207,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx); extern void falcon_setup_xaui(struct efx_nic *efx); extern int falcon_reset_xaui(struct efx_nic *efx); extern void efx_nic_init_common(struct efx_nic *efx); +extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len); @@ -222,6 +222,9 @@ extern int efx_nic_test_registers(struct efx_nic *efx, const struct efx_nic_register_test *regs, size_t n_regs); +extern size_t efx_nic_get_regs_len(struct efx_nic *efx); +extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); + /************************************************************************** * * Falcon MAC stats diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index e077bef08a504258a4e37778c1778600cd640f92..68813d1d85f30eaac48648b7bb42b6e32ecaa34e 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c @@ -91,9 +91,10 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx) if (time_after(jiffies, timeout)) { /* Some cables have EEPROMs that conflict with the * PHY's on-board EEPROM so it cannot load firmware */ - EFX_ERR(efx, "If an SFP+ direct attach cable is" - " connected, please check that it complies" - " with the SFP+ specification\n"); + netif_err(efx, hw, efx->net_dev, + "If an SFP+ direct attach cable is" + " connected, please check that it complies" + " with the SFP+ specification\n"); return -ETIMEDOUT; } msleep(QT2025C_HEARTB_WAIT); @@ -145,7 +146,8 @@ static int qt2025c_wait_reset(struct efx_nic *efx) /* Bug 17689: occasionally heartbeat starts but firmware status * code never progresses beyond 0x00. Try again, once, after * restarting execution of the firmware image. */ - EFX_LOG(efx, "bashing QT2025C microcontroller\n"); + netif_dbg(efx, hw, efx->net_dev, + "bashing QT2025C microcontroller\n"); qt2025c_restart_firmware(efx); rc = qt2025c_wait_heartbeat(efx); if (rc != 0) @@ -165,11 +167,12 @@ static void qt2025c_firmware_id(struct efx_nic *efx) for (i = 0; i < sizeof(firmware_id); i++) firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_PRODUCT_CODE_1 + i); - EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", - (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], - firmware_id[3] >> 4, firmware_id[3] & 0xf, - firmware_id[4], firmware_id[5], - firmware_id[6], firmware_id[7], firmware_id[8]); + netif_info(efx, probe, efx->net_dev, + "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", + (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], + firmware_id[3] >> 4, firmware_id[3] & 0xf, + firmware_id[4], firmware_id[5], + firmware_id[6], firmware_id[7], firmware_id[8]); phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | ((firmware_id[3] & 0x0f) << 16) | (firmware_id[4] << 8) | firmware_id[5]; @@ -198,7 +201,7 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx) } if (time_after_eq(jiffies, phy_data->bug17190_timer)) { - EFX_LOG(efx, "bashing QT2025C PMA/PMD\n"); + netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n"); efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK, true); msleep(100); @@ -231,7 +234,8 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx) reg = efx_mdio_read(efx, 1, 0xc319); if ((reg & 0x0038) == phy_op_mode) return 0; - EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode); + netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n", + phy_op_mode); /* This sequence replicates the register writes configured in the boot * EEPROM (including the differences between board revisions), except @@ -287,8 +291,9 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx) /* Wait for the microcontroller to be ready again */ rc = qt2025c_wait_reset(efx); if (rc < 0) { - EFX_ERR(efx, "PHY microcontroller reset during mode switch " - "timed out\n"); + netif_err(efx, hw, efx->net_dev, + "PHY microcontroller reset during mode switch " + "timed out\n"); return rc; } @@ -324,7 +329,7 @@ static int qt202x_reset_phy(struct efx_nic *efx) return 0; fail: - EFX_ERR(efx, "PHY reset timed out\n"); + netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n"); return rc; } @@ -353,14 +358,15 @@ static int qt202x_phy_init(struct efx_nic *efx) rc = qt202x_reset_phy(efx); if (rc) { - EFX_ERR(efx, "PHY init failed\n"); + netif_err(efx, probe, efx->net_dev, "PHY init failed\n"); return rc; } devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); - EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", - devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), - efx_mdio_id_rev(devid)); + netif_info(efx, probe, efx->net_dev, + "PHY ID reg %x (OUI %06x model %02x revision %x)\n", + devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), + efx_mdio_id_rev(devid)); if (efx->phy_type == PHY_TYPE_QT2025C) qt2025c_firmware_id(efx); diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index e308818b9f5512e8679c9ac486ea141daa6285a9..799c461ce7b850c91fbdbff627c4993880a43c43 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -25,6 +25,9 @@ /* Number of RX descriptors pushed at once. */ #define EFX_RX_BATCH 8 +/* Maximum size of a buffer sharing a page */ +#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) + /* Size of buffer allocated for skb header area. */ #define EFX_SKB_HEADERS 64u @@ -98,155 +101,151 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) return PAGE_SIZE << efx->rx_buffer_order; } +static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 + return __le32_to_cpup((const __le32 *)(buf->data - 4)); +#else + const u8 *data = (const u8 *)(buf->data - 4); + return ((u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24); +#endif +} /** - * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation + * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers * * @rx_queue: Efx RX queue - * @rx_buf: RX buffer structure to populate * - * This allocates memory for a new receive buffer, maps it for DMA, - * and populates a struct efx_rx_buffer with the relevant - * information. Return a negative error code or 0 on success. + * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a + * struct efx_rx_buffer for each one. Return a negative error code or 0 + * on success. May fail having only inserted fewer than EFX_RX_BATCH + * buffers. */ -static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf) +static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; struct net_device *net_dev = efx->net_dev; + struct efx_rx_buffer *rx_buf; int skb_len = efx->rx_buffer_len; + unsigned index, count; - rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); - if (unlikely(!rx_buf->skb)) - return -ENOMEM; + for (count = 0; count < EFX_RX_BATCH; ++count) { + index = rx_queue->added_count & EFX_RXQ_MASK; + rx_buf = efx_rx_buffer(rx_queue, index); - /* Adjust the SKB for padding and checksum */ - skb_reserve(rx_buf->skb, NET_IP_ALIGN); - rx_buf->len = skb_len - NET_IP_ALIGN; - rx_buf->data = (char *)rx_buf->skb->data; - rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; + rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); + if (unlikely(!rx_buf->skb)) + return -ENOMEM; + rx_buf->page = NULL; - rx_buf->dma_addr = pci_map_single(efx->pci_dev, - rx_buf->data, rx_buf->len, - PCI_DMA_FROMDEVICE); + /* Adjust the SKB for padding and checksum */ + skb_reserve(rx_buf->skb, NET_IP_ALIGN); + rx_buf->len = skb_len - NET_IP_ALIGN; + rx_buf->data = (char *)rx_buf->skb->data; + rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; + + rx_buf->dma_addr = pci_map_single(efx->pci_dev, + rx_buf->data, rx_buf->len, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(efx->pci_dev, + rx_buf->dma_addr))) { + dev_kfree_skb_any(rx_buf->skb); + rx_buf->skb = NULL; + return -EIO; + } - if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { - dev_kfree_skb_any(rx_buf->skb); - rx_buf->skb = NULL; - return -EIO; + ++rx_queue->added_count; + ++rx_queue->alloc_skb_count; } return 0; } /** - * efx_init_rx_buffer_page - create new RX buffer using page-based allocation + * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers * * @rx_queue: Efx RX queue - * @rx_buf: RX buffer structure to populate * - * This allocates memory for a new receive buffer, maps it for DMA, - * and populates a struct efx_rx_buffer with the relevant - * information. Return a negative error code or 0 on success. + * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, + * and populates struct efx_rx_buffers for each one. Return a negative error + * code or 0 on success. If a single page can be split between two buffers, + * then the page will either be inserted fully, or not at at all. */ -static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf) +static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; - int bytes, space, offset; - - bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; - - /* If there is space left in the previously allocated page, - * then use it. Otherwise allocate a new one */ - rx_buf->page = rx_queue->buf_page; - if (rx_buf->page == NULL) { - dma_addr_t dma_addr; - - rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, - efx->rx_buffer_order); - if (unlikely(rx_buf->page == NULL)) + struct efx_rx_buffer *rx_buf; + struct page *page; + void *page_addr; + struct efx_rx_page_state *state; + dma_addr_t dma_addr; + unsigned index, count; + + /* We can split a page between two buffers */ + BUILD_BUG_ON(EFX_RX_BATCH & 1); + + for (count = 0; count < EFX_RX_BATCH; ++count) { + page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, + efx->rx_buffer_order); + if (unlikely(page == NULL)) return -ENOMEM; - - dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, - 0, efx_rx_buf_size(efx), + dma_addr = pci_map_page(efx->pci_dev, page, 0, + efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { - __free_pages(rx_buf->page, efx->rx_buffer_order); - rx_buf->page = NULL; + __free_pages(page, efx->rx_buffer_order); return -EIO; } - - rx_queue->buf_page = rx_buf->page; - rx_queue->buf_dma_addr = dma_addr; - rx_queue->buf_data = (page_address(rx_buf->page) + - EFX_PAGE_IP_ALIGN); - } - - rx_buf->len = bytes; - rx_buf->data = rx_queue->buf_data; - offset = efx_rx_buf_offset(rx_buf); - rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; - - /* Try to pack multiple buffers per page */ - if (efx->rx_buffer_order == 0) { - /* The next buffer starts on the next 512 byte boundary */ - rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); - offset += ((bytes + 0x1ff) & ~0x1ff); - - space = efx_rx_buf_size(efx) - offset; - if (space >= bytes) { - /* Refs dropped on kernel releasing each skb */ - get_page(rx_queue->buf_page); - goto out; + page_addr = page_address(page); + state = page_addr; + state->refcnt = 0; + state->dma_addr = dma_addr; + + page_addr += sizeof(struct efx_rx_page_state); + dma_addr += sizeof(struct efx_rx_page_state); + + split: + index = rx_queue->added_count & EFX_RXQ_MASK; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; + rx_buf->skb = NULL; + rx_buf->page = page; + rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN; + rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; + ++rx_queue->added_count; + ++rx_queue->alloc_page_count; + ++state->refcnt; + + if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { + /* Use the second half of the page */ + get_page(page); + dma_addr += (PAGE_SIZE >> 1); + page_addr += (PAGE_SIZE >> 1); + ++count; + goto split; } } - /* This is the final RX buffer for this page, so mark it for - * unmapping */ - rx_queue->buf_page = NULL; - rx_buf->unmap_addr = rx_queue->buf_dma_addr; - - out: return 0; } -/* This allocates memory for a new receive buffer, maps it for DMA, - * and populates a struct efx_rx_buffer with the relevant - * information. - */ -static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *new_rx_buf) -{ - int rc = 0; - - if (rx_queue->channel->rx_alloc_push_pages) { - new_rx_buf->skb = NULL; - rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf); - rx_queue->alloc_page_count++; - } else { - new_rx_buf->page = NULL; - rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf); - rx_queue->alloc_skb_count++; - } - - if (unlikely(rc < 0)) - EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__, - rx_queue->queue, rc); - return rc; -} - static void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf) { if (rx_buf->page) { + struct efx_rx_page_state *state; + EFX_BUG_ON_PARANOID(rx_buf->skb); - if (rx_buf->unmap_addr) { - pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, + + state = page_address(rx_buf->page); + if (--state->refcnt == 0) { + pci_unmap_page(efx->pci_dev, + state->dma_addr, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); - rx_buf->unmap_addr = 0; } } else if (likely(rx_buf->skb)) { pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, @@ -273,31 +272,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, efx_free_rx_buffer(rx_queue->efx, rx_buf); } +/* Attempt to resurrect the other receive buffer that used to share this page, + * which had previously been passed up to the kernel and freed. */ +static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ + struct efx_rx_page_state *state = page_address(rx_buf->page); + struct efx_rx_buffer *new_buf; + unsigned fill_level, index; + + /* +1 because efx_rx_packet() incremented removed_count. +1 because + * we'd like to insert an additional descriptor whilst leaving + * EFX_RXD_HEAD_ROOM for the non-recycle path */ + fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); + if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) { + /* We could place "state" on a list, and drain the list in + * efx_fast_push_rx_descriptors(). For now, this will do. */ + return; + } + + ++state->refcnt; + get_page(rx_buf->page); + + index = rx_queue->added_count & EFX_RXQ_MASK; + new_buf = efx_rx_buffer(rx_queue, index); + new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); + new_buf->skb = NULL; + new_buf->page = rx_buf->page; + new_buf->data = (void *) + ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1)); + new_buf->len = rx_buf->len; + ++rx_queue->added_count; +} + +/* Recycle the given rx buffer directly back into the rx_queue. There is + * always room to add this buffer, because we've just popped a buffer. */ +static void efx_recycle_rx_buffer(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel]; + struct efx_rx_buffer *new_buf; + unsigned index; + + if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && + page_count(rx_buf->page) == 1) + efx_resurrect_rx_buffer(rx_queue, rx_buf); + + index = rx_queue->added_count & EFX_RXQ_MASK; + new_buf = efx_rx_buffer(rx_queue, index); + + memcpy(new_buf, rx_buf, sizeof(*new_buf)); + rx_buf->page = NULL; + rx_buf->skb = NULL; + ++rx_queue->added_count; +} + /** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue - * @retry: Recheck the fill level * This will aim to fill the RX descriptor queue up to * @rx_queue->@fast_fill_limit. If there is insufficient atomic - * memory to do so, the caller should retry. + * memory to do so, a slow fill will be scheduled. + * + * The caller must provide serialisation (none is used here). In practise, + * this means this function must run from the NAPI handler, or be called + * when NAPI is disabled. */ -static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, - int retry) +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) { - struct efx_rx_buffer *rx_buf; - unsigned fill_level, index; - int i, space, rc = 0; + struct efx_channel *channel = rx_queue->channel; + unsigned fill_level; + int space, rc = 0; - /* Calculate current fill level. Do this outside the lock, - * because most of the time we'll end up not wanting to do the - * fill anyway. - */ + /* Calculate current fill level, and exit if we don't need to fill */ fill_level = (rx_queue->added_count - rx_queue->removed_count); EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); - - /* Don't fill if we don't need to */ if (fill_level >= rx_queue->fast_fill_trigger) - return 0; + goto out; /* Record minimum fill level */ if (unlikely(fill_level < rx_queue->min_fill)) { @@ -305,99 +357,47 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, rx_queue->min_fill = fill_level; } - /* Acquire RX add lock. If this lock is contended, then a fast - * fill must already be in progress (e.g. in the refill - * tasklet), so we don't need to do anything - */ - if (!spin_trylock_bh(&rx_queue->add_lock)) - return -1; - - retry: - /* Recalculate current fill level now that we have the lock */ - fill_level = (rx_queue->added_count - rx_queue->removed_count); - EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); space = rx_queue->fast_fill_limit - fill_level; if (space < EFX_RX_BATCH) - goto out_unlock; + goto out; - EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" - " level %d to level %d using %s allocation\n", - rx_queue->queue, fill_level, rx_queue->fast_fill_limit, - rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filling descriptor ring from" + " level %d to level %d using %s allocation\n", + rx_queue->queue, fill_level, rx_queue->fast_fill_limit, + channel->rx_alloc_push_pages ? "page" : "skb"); do { - for (i = 0; i < EFX_RX_BATCH; ++i) { - index = rx_queue->added_count & EFX_RXQ_MASK; - rx_buf = efx_rx_buffer(rx_queue, index); - rc = efx_init_rx_buffer(rx_queue, rx_buf); - if (unlikely(rc)) - goto out; - ++rx_queue->added_count; + if (channel->rx_alloc_push_pages) + rc = efx_init_rx_buffers_page(rx_queue); + else + rc = efx_init_rx_buffers_skb(rx_queue); + if (unlikely(rc)) { + /* Ensure that we don't leave the rx queue empty */ + if (rx_queue->added_count == rx_queue->removed_count) + efx_schedule_slow_fill(rx_queue); + goto out; } } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); - EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " - "to level %d\n", rx_queue->queue, - rx_queue->added_count - rx_queue->removed_count); + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filled descriptor ring " + "to level %d\n", rx_queue->queue, + rx_queue->added_count - rx_queue->removed_count); out: - /* Send write pointer to card. */ - efx_nic_notify_rx_desc(rx_queue); - - /* If the fast fill is running inside from the refill tasklet, then - * for SMP systems it may be running on a different CPU to - * RX event processing, which means that the fill level may now be - * out of date. */ - if (unlikely(retry && (rc == 0))) - goto retry; - - out_unlock: - spin_unlock_bh(&rx_queue->add_lock); - - return rc; -} - -/** - * efx_fast_push_rx_descriptors - push new RX descriptors quickly - * @rx_queue: RX descriptor queue - * - * This will aim to fill the RX descriptor queue up to - * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so, - * it will schedule a work item to immediately continue the fast fill - */ -void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) -{ - int rc; - - rc = __efx_fast_push_rx_descriptors(rx_queue, 0); - if (unlikely(rc)) { - /* Schedule the work item to run immediately. The hope is - * that work is immediately pending to free some memory - * (e.g. an RX event or TX completion) - */ - efx_schedule_slow_fill(rx_queue, 0); - } + if (rx_queue->notified_count != rx_queue->added_count) + efx_nic_notify_rx_desc(rx_queue); } -void efx_rx_work(struct work_struct *data) +void efx_rx_slow_fill(unsigned long context) { - struct efx_rx_queue *rx_queue; - int rc; - - rx_queue = container_of(data, struct efx_rx_queue, work.work); - - if (unlikely(!rx_queue->channel->enabled)) - return; - - EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU " - "%d\n", rx_queue->queue, raw_smp_processor_id()); + struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; + struct efx_channel *channel = rx_queue->channel; + /* Post an event to cause NAPI to run and refill the queue */ + efx_nic_generate_fill_event(channel); ++rx_queue->slow_fill_count; - /* Push new RX descriptors, allowing at least 1 jiffy for - * the kernel to free some more memory. */ - rc = __efx_fast_push_rx_descriptors(rx_queue, 1); - if (rc) - efx_schedule_slow_fill(rx_queue, 1); } static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, @@ -417,10 +417,12 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, *discard = true; if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { - EFX_ERR_RL(efx, " RX queue %d seriously overlength " - "RX event (0x%x > 0x%x+0x%x). Leaking\n", - rx_queue->queue, len, max_len, - efx->type->rx_buffer_padding); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + " RX queue %d seriously overlength " + "RX event (0x%x > 0x%x+0x%x). Leaking\n", + rx_queue->queue, len, max_len, + efx->type->rx_buffer_padding); /* If this buffer was skb-allocated, then the meta * data at the end of the skb will be trashed. So * we have no choice but to leak the fragment. @@ -428,8 +430,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, *leak_packet = (rx_buf->skb != NULL); efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); } else { - EFX_ERR_RL(efx, " RX queue %d overlength RX event " - "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + " RX queue %d overlength RX event " + "(0x%x > 0x%x)\n", + rx_queue->queue, len, max_len); } rx_queue->channel->n_rx_overlength++; @@ -449,6 +454,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel, /* Pass the skb/page into the LRO engine */ if (rx_buf->page) { + struct efx_nic *efx = channel->efx; struct page *page = rx_buf->page; struct sk_buff *skb; @@ -461,6 +467,9 @@ static void efx_rx_packet_lro(struct efx_channel *channel, return; } + if (efx->net_dev->features & NETIF_F_RXHASH) + skb->rxhash = efx_rx_buf_hash(rx_buf); + skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->frags[0].page_offset = efx_rx_buf_offset(rx_buf); @@ -498,6 +507,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, unsigned int len, bool checksummed, bool discard) { struct efx_nic *efx = rx_queue->efx; + struct efx_channel *channel = rx_queue->channel; struct efx_rx_buffer *rx_buf; bool leak_packet = false; @@ -516,21 +526,23 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, efx_rx_packet__check_len(rx_queue, rx_buf, len, &discard, &leak_packet); - EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n", - rx_queue->queue, index, - (unsigned long long)rx_buf->dma_addr, len, - (checksummed ? " [SUMMED]" : ""), - (discard ? " [DISCARD]" : "")); + netif_vdbg(efx, rx_status, efx->net_dev, + "RX queue %d received id %x at %llx+%x %s%s\n", + rx_queue->queue, index, + (unsigned long long)rx_buf->dma_addr, len, + (checksummed ? " [SUMMED]" : ""), + (discard ? " [DISCARD]" : "")); /* Discard packet, if instructed to do so */ if (unlikely(discard)) { if (unlikely(leak_packet)) - rx_queue->channel->n_skbuff_leaks++; + channel->n_skbuff_leaks++; else - /* We haven't called efx_unmap_rx_buffer yet, - * so fini the entire rx_buffer here */ - efx_fini_rx_buffer(rx_queue, rx_buf); - return; + efx_recycle_rx_buffer(channel, rx_buf); + + /* Don't hold off the previous receive */ + rx_buf = NULL; + goto out; } /* Release card resources - assumes all RX buffers consumed in-order @@ -547,6 +559,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, * prefetched into cache. */ rx_buf->len = len; +out: if (rx_queue->channel->rx_pkt) __efx_rx_packet(rx_queue->channel, rx_queue->channel->rx_pkt, @@ -562,6 +575,9 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_nic *efx = channel->efx; struct sk_buff *skb; + rx_buf->data += efx->type->rx_buffer_hash_size; + rx_buf->len -= efx->type->rx_buffer_hash_size; + /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ @@ -574,8 +590,12 @@ void __efx_rx_packet(struct efx_channel *channel, if (rx_buf->skb) { prefetch(skb_shinfo(rx_buf->skb)); + skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); skb_put(rx_buf->skb, rx_buf->len); + if (efx->net_dev->features & NETIF_F_RXHASH) + rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); + /* Move past the ethernet header. rx_buf->data still points * at the ethernet header */ rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, @@ -633,7 +653,8 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) unsigned int rxq_size; int rc; - EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); + netif_dbg(efx, probe, efx->net_dev, + "creating RX queue %d\n", rx_queue->queue); /* Allocate RX buffers */ rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); @@ -653,7 +674,8 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) { unsigned int max_fill, trigger, limit; - EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "initialising RX queue %d\n", rx_queue->queue); /* Initialise ptr fields */ rx_queue->added_count = 0; @@ -680,8 +702,10 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) int i; struct efx_rx_buffer *rx_buf; - EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "shutting down RX queue %d\n", rx_queue->queue); + del_timer_sync(&rx_queue->slow_fill); efx_nic_fini_rx(rx_queue); /* Release RX buffers NB start at index 0 not current HW ptr */ @@ -691,21 +715,12 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) efx_fini_rx_buffer(rx_queue, rx_buf); } } - - /* For a page that is part-way through splitting into RX buffers */ - if (rx_queue->buf_page != NULL) { - pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, - efx_rx_buf_size(rx_queue->efx), - PCI_DMA_FROMDEVICE); - __free_pages(rx_queue->buf_page, - rx_queue->efx->rx_buffer_order); - rx_queue->buf_page = NULL; - } } void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) { - EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "destroying RX queue %d\n", rx_queue->queue); efx_nic_remove_rx(rx_queue); diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 371e86cc090f3c93eb1fad2775012af54ef0547f..85f015f005d5c12afecdf1a1db7fe91fbdd8c3f1 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c @@ -38,7 +38,7 @@ struct efx_loopback_payload { struct udphdr udp; __be16 iteration; const char msg[64]; -} __attribute__ ((packed)); +} __packed; /* Loopback test source MAC address */ static const unsigned char payload_source[ETH_ALEN] = { @@ -123,7 +123,7 @@ static int efx_test_interrupts(struct efx_nic *efx, { struct efx_channel *channel; - EFX_LOG(efx, "testing interrupts\n"); + netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); tests->interrupt = -1; /* Reset interrupt flag */ @@ -142,16 +142,17 @@ static int efx_test_interrupts(struct efx_nic *efx, efx_nic_generate_interrupt(efx); /* Wait for arrival of test interrupt. */ - EFX_LOG(efx, "waiting for test interrupt\n"); + netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); schedule_timeout_uninterruptible(HZ / 10); if (efx->last_irq_cpu >= 0) goto success; - EFX_ERR(efx, "timed out waiting for interrupt\n"); + netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); return -ETIMEDOUT; success: - EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx), + netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", + INT_MODE(efx), efx->last_irq_cpu); tests->interrupt = 1; return 0; @@ -161,23 +162,18 @@ static int efx_test_interrupts(struct efx_nic *efx, static int efx_test_eventq_irq(struct efx_channel *channel, struct efx_self_tests *tests) { - unsigned int magic, count; - - /* Channel specific code, limited to 20 bits */ - magic = (0x00010150 + channel->channel); - EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", - channel->channel, magic); + struct efx_nic *efx = channel->efx; + unsigned int magic_count, count; tests->eventq_dma[channel->channel] = -1; tests->eventq_int[channel->channel] = -1; tests->eventq_poll[channel->channel] = -1; - /* Reset flag and zero magic word */ + magic_count = channel->magic_count; channel->efx->last_irq_cpu = -1; - channel->eventq_magic = 0; smp_wmb(); - efx_nic_generate_test_event(channel, magic); + efx_nic_generate_test_event(channel); /* Wait for arrival of interrupt */ count = 0; @@ -187,33 +183,36 @@ static int efx_test_eventq_irq(struct efx_channel *channel, if (channel->work_pending) efx_process_channel_now(channel); - if (channel->eventq_magic == magic) + if (channel->magic_count != magic_count) goto eventq_ok; } while (++count < 2); - EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", - channel->channel); + netif_err(efx, drv, efx->net_dev, + "channel %d timed out waiting for event queue\n", + channel->channel); /* See if interrupt arrived */ if (channel->efx->last_irq_cpu >= 0) { - EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " - "during event queue test\n", channel->channel, - raw_smp_processor_id()); + netif_err(efx, drv, efx->net_dev, + "channel %d saw interrupt on CPU%d " + "during event queue test\n", channel->channel, + raw_smp_processor_id()); tests->eventq_int[channel->channel] = 1; } /* Check to see if event was received even if interrupt wasn't */ efx_process_channel_now(channel); - if (channel->eventq_magic == magic) { - EFX_ERR(channel->efx, "channel %d event was generated, but " - "failed to trigger an interrupt\n", channel->channel); + if (channel->magic_count != magic_count) { + netif_err(efx, drv, efx->net_dev, + "channel %d event was generated, but " + "failed to trigger an interrupt\n", channel->channel); tests->eventq_dma[channel->channel] = 1; } return -ETIMEDOUT; eventq_ok: - EFX_LOG(channel->efx, "channel %d event queue passed\n", - channel->channel); + netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n", + channel->channel); tests->eventq_dma[channel->channel] = 1; tests->eventq_int[channel->channel] = 1; tests->eventq_poll[channel->channel] = 1; @@ -266,51 +265,57 @@ void efx_loopback_rx_packet(struct efx_nic *efx, /* Check that header exists */ if (pkt_len < sizeof(received->header)) { - EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " - "test\n", pkt_len, LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "saw runt RX packet (length %d) in %s loopback " + "test\n", pkt_len, LOOPBACK_MODE(efx)); goto err; } /* Check that the ethernet header exists */ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { - EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", - LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "saw non-loopback RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); goto err; } /* Check packet length */ if (pkt_len != sizeof(*payload)) { - EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " - "%s loopback test\n", pkt_len, (int)sizeof(*payload), - LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "saw incorrect RX packet length %d (wanted %d) in " + "%s loopback test\n", pkt_len, (int)sizeof(*payload), + LOOPBACK_MODE(efx)); goto err; } /* Check that IP header matches */ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { - EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", - LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "saw corrupted IP header in %s loopback test\n", + LOOPBACK_MODE(efx)); goto err; } /* Check that msg and padding matches */ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { - EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", - LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "saw corrupted RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); goto err; } /* Check that iteration matches */ if (received->iteration != payload->iteration) { - EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " - "%s loopback test\n", ntohs(received->iteration), - ntohs(payload->iteration), LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "saw RX packet from iteration %d (wanted %d) in " + "%s loopback test\n", ntohs(received->iteration), + ntohs(payload->iteration), LOOPBACK_MODE(efx)); goto err; } /* Increase correct RX count */ - EFX_TRACE(efx, "got loopback RX in %s loopback test\n", - LOOPBACK_MODE(efx)); + netif_vdbg(efx, drv, efx->net_dev, + "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); atomic_inc(&state->rx_good); return; @@ -318,10 +323,10 @@ void efx_loopback_rx_packet(struct efx_nic *efx, err: #ifdef EFX_ENABLE_DEBUG if (atomic_read(&state->rx_bad) == 0) { - EFX_ERR(efx, "received packet:\n"); + netif_err(efx, drv, efx->net_dev, "received packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, buf_ptr, pkt_len, 0); - EFX_ERR(efx, "expected packet:\n"); + netif_err(efx, drv, efx->net_dev, "expected packet:\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, &state->payload, sizeof(state->payload), 0); } @@ -402,9 +407,11 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) netif_tx_unlock_bh(efx->net_dev); if (rc != NETDEV_TX_OK) { - EFX_ERR(efx, "TX queue %d could not transmit packet %d " - "of %d in %s loopback test\n", tx_queue->queue, - i + 1, state->packet_count, LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "TX queue %d could not transmit packet %d of " + "%d in %s loopback test\n", tx_queue->queue, + i + 1, state->packet_count, + LOOPBACK_MODE(efx)); /* Defer cleaning up the other skbs for the caller */ kfree_skb(skb); @@ -460,20 +467,22 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, /* Don't free the skbs; they will be picked up on TX * overflow or channel teardown. */ - EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " - "TX completion events in %s loopback test\n", - tx_queue->queue, tx_done, state->packet_count, - LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "TX completion events in %s loopback test\n", + tx_queue->queue, tx_done, state->packet_count, + LOOPBACK_MODE(efx)); rc = -ETIMEDOUT; /* Allow to fall through so we see the RX errors as well */ } /* We may always be up to a flush away from our desired packet total */ if (rx_good != state->packet_count) { - EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " - "received packets in %s loopback test\n", - tx_queue->queue, rx_good, state->packet_count, - LOOPBACK_MODE(efx)); + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "received packets in %s loopback test\n", + tx_queue->queue, rx_good, state->packet_count, + LOOPBACK_MODE(efx)); rc = -ETIMEDOUT; /* Fall through */ } @@ -505,9 +514,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, return -ENOMEM; state->flush = false; - EFX_LOG(efx, "TX queue %d testing %s loopback with %d " - "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), - state->packet_count); + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d testing %s loopback with %d packets\n", + tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); efx_iterate_state(efx); begin_rc = efx_begin_loopback(tx_queue); @@ -531,9 +541,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, } } - EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " - "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), - state->packet_count); + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d passed %s loopback test with a burst length " + "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); return 0; } @@ -545,7 +556,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, static int efx_wait_for_link(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; - int count; + int count, link_up_count = 0; bool link_up; for (count = 0; count < 40; count++) { @@ -567,8 +578,12 @@ static int efx_wait_for_link(struct efx_nic *efx) link_up = !efx->mac_op->check_fault(efx); mutex_unlock(&efx->mac_lock); - if (link_up) - return 0; + if (link_up) { + if (++link_up_count == 2) + return 0; + } else { + link_up_count = 0; + } } return -ETIMEDOUT; @@ -604,15 +619,17 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, rc = __efx_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); if (rc) { - EFX_ERR(efx, "unable to move into %s loopback\n", - LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "unable to move into %s loopback\n", + LOOPBACK_MODE(efx)); goto out; } rc = efx_wait_for_link(efx); if (rc) { - EFX_ERR(efx, "loopback %s never came up\n", - LOOPBACK_MODE(efx)); + netif_err(efx, drv, efx->net_dev, + "loopback %s never came up\n", + LOOPBACK_MODE(efx)); goto out; } @@ -720,7 +737,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, rc_reset = rc; if (rc_reset) { - EFX_ERR(efx, "Unable to recover from chip test\n"); + netif_err(efx, drv, efx->net_dev, + "Unable to recover from chip test\n"); efx_schedule_reset(efx, RESET_TYPE_DISABLE); return rc_reset; } diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index f2b1e6180753536336921b90f53f62c8675fda72..3fab030f8ab5508aa191dd5cc2d1b7b69a935fd8 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -118,10 +118,11 @@ static int siena_probe_port(struct efx_nic *efx) MC_CMD_MAC_NSTATS * sizeof(u64)); if (rc) return rc; - EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", - (u64)efx->stats_buffer.dma_addr, - efx->stats_buffer.addr, - (u64)virt_to_phys(efx->stats_buffer.addr)); + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64)efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64)virt_to_phys(efx->stats_buffer.addr)); efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); @@ -216,7 +217,8 @@ static int siena_probe_nic(struct efx_nic *efx) efx->nic_data = nic_data; if (efx_nic_fpga_ver(efx) != 0) { - EFX_ERR(efx, "Siena FPGA not supported\n"); + netif_err(efx, probe, efx->net_dev, + "Siena FPGA not supported\n"); rc = -ENODEV; goto fail1; } @@ -233,8 +235,8 @@ static int siena_probe_nic(struct efx_nic *efx) rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); if (rc) { - EFX_ERR(efx, "Failed to read MCPU firmware version - " - "rc %d\n", rc); + netif_err(efx, probe, efx->net_dev, + "Failed to read MCPU firmware version - rc %d\n", rc); goto fail1; /* MCPU absent? */ } @@ -242,17 +244,19 @@ static int siena_probe_nic(struct efx_nic *efx) * filter settings. We must do this before we reset the NIC */ rc = efx_mcdi_drv_attach(efx, true, &already_attached); if (rc) { - EFX_ERR(efx, "Unable to register driver with MCPU\n"); + netif_err(efx, probe, efx->net_dev, + "Unable to register driver with MCPU\n"); goto fail2; } if (already_attached) /* Not a fatal error */ - EFX_ERR(efx, "Host already registered with MCPU\n"); + netif_err(efx, probe, efx->net_dev, + "Host already registered with MCPU\n"); /* Now we can reset the NIC */ rc = siena_reset_hw(efx, RESET_TYPE_ALL); if (rc) { - EFX_ERR(efx, "failed to reset NIC\n"); + netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } @@ -264,24 +268,23 @@ static int siena_probe_nic(struct efx_nic *efx) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); - EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", - (unsigned long long)efx->irq_status.dma_addr, - efx->irq_status.addr, - (unsigned long long)virt_to_phys(efx->irq_status.addr)); + netif_dbg(efx, probe, efx->net_dev, + "INT_KER at %llx (virt %p phys %llx)\n", + (unsigned long long)efx->irq_status.dma_addr, + efx->irq_status.addr, + (unsigned long long)virt_to_phys(efx->irq_status.addr)); /* Read in the non-volatile configuration */ rc = siena_probe_nvconfig(efx); if (rc == -EINVAL) { - EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n"); + netif_err(efx, probe, efx->net_dev, + "NVRAM is invalid therefore using defaults\n"); efx->phy_type = PHY_TYPE_NONE; efx->mdio.prtad = MDIO_PRTAD_NONE; } else if (rc) { goto fail5; } - get_random_bytes(&nic_data->ipv6_rss_key, - sizeof(nic_data->ipv6_rss_key)); - return 0; fail5: @@ -301,7 +304,6 @@ static int siena_probe_nic(struct efx_nic *efx) */ static int siena_init_nic(struct efx_nic *efx) { - struct siena_nic_data *nic_data = efx->nic_data; efx_oword_t temp; int rc; @@ -326,25 +328,36 @@ static int siena_init_nic(struct efx_nic *efx) efx_reado(efx, &temp, FR_AZ_RX_CFG); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); + /* Enable hash insertion. This is broken for the 'Falcon' hash + * if IPv6 hashing is also enabled, so also select Toeplitz + * TCP/IPv4 and IPv4 hashes. */ + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); efx_writeo(efx, &temp, FR_AZ_RX_CFG); + /* Set hash key for IPv4 */ + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + /* Enable IPv6 RSS */ - BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) != + BUILD_BUG_ON(sizeof(efx->rx_hash_key) < 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); - memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp)); + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); - memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp)); + memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); - memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp), + memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) /* No MCDI operation has been defined to set thresholds */ - EFX_ERR(efx, "ignoring RX flow control thresholds\n"); + netif_err(efx, hw, efx->net_dev, + "ignoring RX flow control thresholds\n"); /* Enable event logging */ rc = efx_mcdi_log_ctrl(efx, true, false, 0); @@ -565,7 +578,8 @@ static int siena_set_wol(struct efx_nic *efx, u32 type) return 0; fail: - EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc); + netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", + __func__, type, rc); return rc; } @@ -628,6 +642,7 @@ struct efx_nic_type siena_a0_nic_type = { .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy @@ -635,6 +650,7 @@ struct efx_nic_type siena_a0_nic_type = { * channels */ .tx_dc_base = 0x88000, .rx_dc_base = 0x68000, - .offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM, + .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXHASH), .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, }; diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index f21efe7bd316e3618664cbcd0f1e94ef42a97ea9..6791be90c2fe095f213e8d94e3f5077f0e8053da 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -228,7 +228,8 @@ int sft9001_wait_boot(struct efx_nic *efx) boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_BOOT_STATUS_REG); if (boot_stat >= 0) { - EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat); + netif_dbg(efx, hw, efx->net_dev, + "PHY boot status = %#x\n", boot_stat); switch (boot_stat & ((1 << PCS_BOOT_FATAL_ERROR_LBN) | (3 << PCS_BOOT_PROGRESS_LBN) | @@ -463,10 +464,11 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok) reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN; } else { reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN; - EFX_ERR(efx, "appears to be plugged into a port" - " that is not 10GBASE-T capable. The PHY" - " supports 10GBASE-T ONLY, so no link can" - " be established\n"); + netif_err(efx, link, efx->net_dev, + "appears to be plugged into a port" + " that is not 10GBASE-T capable. The PHY" + " supports 10GBASE-T ONLY, so no link can" + " be established\n"); } efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg); diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 6bb12a87ef2d5f4292f4343f41e6ed010fd5f14f..c6942da2c99af75b6749c7b94edf7b8204cdc676 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -42,7 +42,7 @@ void efx_stop_queue(struct efx_channel *channel) return; spin_lock_bh(&channel->tx_stop_lock); - EFX_TRACE(efx, "stop TX queue\n"); + netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n"); atomic_inc(&channel->tx_stop_count); netif_tx_stop_queue( @@ -64,7 +64,7 @@ void efx_wake_queue(struct efx_channel *channel) local_bh_disable(); if (atomic_dec_and_lock(&channel->tx_stop_count, &channel->tx_stop_lock)) { - EFX_TRACE(efx, "waking TX queue\n"); + netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); netif_tx_wake_queue( netdev_get_tx_queue( efx->net_dev, @@ -94,8 +94,9 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, if (buffer->skb) { dev_kfree_skb_any((struct sk_buff *) buffer->skb); buffer->skb = NULL; - EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x " - "complete\n", tx_queue->queue, read_ptr); + netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, + "TX queue %d transmission id %x complete\n", + tx_queue->queue, tx_queue->read_count); } } @@ -300,9 +301,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) return NETDEV_TX_OK; pci_err: - EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d " - "fragments for DMA\n", tx_queue->queue, skb->len, - skb_shinfo(skb)->nr_frags + 1); + netif_err(efx, tx_err, efx->net_dev, + " TX queue %d could not map skb with %d bytes %d " + "fragments for DMA\n", tx_queue->queue, skb->len, + skb_shinfo(skb)->nr_frags + 1); /* Mark the packet as transmitted, and free the SKB ourselves */ dev_kfree_skb_any(skb); @@ -354,9 +356,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; if (unlikely(buffer->len == 0)) { - EFX_ERR(tx_queue->efx, "TX queue %d spurious TX " - "completion id %x\n", tx_queue->queue, - read_ptr); + netif_err(efx, tx_err, efx->net_dev, + "TX queue %d spurious TX completion id %x\n", + tx_queue->queue, read_ptr); efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; } @@ -431,7 +433,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) unsigned int txq_size; int i, rc; - EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); + netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n", + tx_queue->queue); /* Allocate software ring */ txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); @@ -456,7 +459,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) void efx_init_tx_queue(struct efx_tx_queue *tx_queue) { - EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "initialising TX queue %d\n", tx_queue->queue); tx_queue->insert_count = 0; tx_queue->write_count = 0; @@ -488,7 +492,8 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { - EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "shutting down TX queue %d\n", tx_queue->queue); /* Flush TX queue, remove descriptor ring */ efx_nic_fini_tx(tx_queue); @@ -507,7 +512,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { - EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); kfree(tx_queue->buffer); @@ -639,8 +645,8 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); if (base_kva == NULL) { - EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" - " headers\n"); + netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, + "Unable to allocate page for TSO headers\n"); return -ENOMEM; } @@ -1124,7 +1130,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, return NETDEV_TX_OK; mem_err: - EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); + netif_err(efx, tx_err, efx->net_dev, + "Out of memory for TSO headers, or PCI mapping error\n"); dev_kfree_skb_any(skb); goto unwind; diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 518f7fc914732c12248bb5d3417e79fc7d3e74ce..782e45a613d606c9c8e615015f30fc3a06b1a4ec 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -54,7 +54,7 @@ /* Increase filter depth to avoid RX_RESET */ #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A /* Flushes may never complete */ -#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A +#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB /* Leak overlength packets rather than free */ #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 501a55ffce570d5e9920efb6933346705473e872..32f2deaa38bb5a2f7b8964286fca1595fd82117a 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -88,6 +88,55 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rpadir = 1, .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; +#elif defined(CONFIG_CPU_SUBTYPE_SH7757) +#define SH_ETH_RESET_DEFAULT 1 +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 ioaddr = ndev->base_addr; + + if (mdp->duplex) /* Full */ + ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); + else /* Half */ + ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); +} + +static void sh_eth_set_rate(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 ioaddr = ndev->base_addr; + + switch (mdp->speed) { + case 10: /* 10BASE */ + ctrl_outl(0, ioaddr + RTRATE); + break; + case 100:/* 100BASE */ + ctrl_outl(1, ioaddr + RTRATE); + break; + default: + break; + } +} + +/* SH7757 */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate, + + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .rmcr_value = 0x00000001, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_ade = 1, +}; #elif defined(CONFIG_CPU_SUBTYPE_SH7763) #define SH_ETH_HAS_TSU 1 @@ -1023,7 +1072,9 @@ static int sh_eth_open(struct net_device *ndev) pm_runtime_get_sync(&mdp->pdev->dev); ret = request_irq(ndev->irq, sh_eth_interrupt, -#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764) +#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ + defined(CONFIG_CPU_SUBTYPE_SH7764) || \ + defined(CONFIG_CPU_SUBTYPE_SH7757) IRQF_SHARED, #else 0, @@ -1233,7 +1284,7 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, if (!phydev) return -ENODEV; - return phy_mii_ioctl(phydev, if_mii(rq), cmd); + return phy_mii_ioctl(phydev, rq, cmd); } #if defined(SH_ETH_HAS_TSU) diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7985165e84fc4ed6ca6ce5fceb8e60d54dca0b5d..194e5cf8c763d8055e988ea14d1c5c43a8819029 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -79,7 +79,7 @@ #define SKY2_EEPROM_MAGIC 0x9955aabb -#define RING_NEXT(x,s) (((x)+1) & ((s)-1)) +#define RING_NEXT(x, s) (((x)+1) & ((s)-1)) static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK @@ -172,7 +172,7 @@ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) udelay(10); } - dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name); + dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); return -ETIMEDOUT; io_error: @@ -1067,7 +1067,7 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) return le; } -static unsigned sky2_get_rx_threshold(struct sky2_port* sky2) +static unsigned sky2_get_rx_threshold(struct sky2_port *sky2) { unsigned size; @@ -1078,7 +1078,7 @@ static unsigned sky2_get_rx_threshold(struct sky2_port* sky2) return (size - 8) / sizeof(u32); } -static unsigned sky2_get_rx_data_size(struct sky2_port* sky2) +static unsigned sky2_get_rx_data_size(struct sky2_port *sky2) { struct rx_ring_info *re; unsigned size; @@ -1102,7 +1102,7 @@ static unsigned sky2_get_rx_data_size(struct sky2_port* sky2) } /* Build description to hardware for one receive segment */ -static void sky2_rx_add(struct sky2_port *sky2, u8 op, +static void sky2_rx_add(struct sky2_port *sky2, u8 op, dma_addr_t map, unsigned len) { struct sky2_rx_le *le; @@ -3014,7 +3014,7 @@ static int __devinit sky2_init(struct sky2_hw *hw) hw->chip_id = sky2_read8(hw, B2_CHIP_ID); hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; - switch(hw->chip_id) { + switch (hw->chip_id) { case CHIP_ID_YUKON_XL: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; if (hw->chip_rev < CHIP_REV_YU_XL_A2) @@ -3685,7 +3685,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p) return 0; } -static void inline sky2_add_filter(u8 filter[8], const u8 *addr) +static inline void sky2_add_filter(u8 filter[8], const u8 *addr) { u32 bit; @@ -3911,7 +3911,7 @@ static int sky2_set_coalesce(struct net_device *dev, return -EINVAL; if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) return -EINVAL; - if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING) + if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) return -EINVAL; if (ecmd->tx_coalesce_usecs == 0) @@ -4188,17 +4188,13 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom static int sky2_set_flags(struct net_device *dev, u32 data) { struct sky2_port *sky2 = netdev_priv(dev); + u32 supported = + (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; + int rc; - if (data & ~ETH_FLAG_RXHASH) - return -EOPNOTSUPP; - - if (data & ETH_FLAG_RXHASH) { - if (sky2->hw->flags & SKY2_HW_RSS_BROKEN) - return -EINVAL; - - dev->features |= NETIF_F_RXHASH; - } else - dev->features &= ~NETIF_F_RXHASH; + rc = ethtool_op_set_flags(dev, data, supported); + if (rc) + return rc; rx_set_rss(dev); @@ -4376,7 +4372,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) seq_printf(seq, "%u:", idx); sop = 0; - switch(le->opcode & ~HW_OWNER) { + switch (le->opcode & ~HW_OWNER) { case OP_ADDR64: seq_printf(seq, " %#x:", a); break; @@ -4445,7 +4441,7 @@ static int sky2_device_event(struct notifier_block *unused, if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) return NOTIFY_DONE; - switch(event) { + switch (event) { case NETDEV_CHANGENAME: if (sky2->debugfs) { sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, @@ -4640,7 +4636,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) struct pci_dev *pdev = hw->pdev; int err; - init_waitqueue_head (&hw->msi_wait); + init_waitqueue_head(&hw->msi_wait); sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); @@ -4757,7 +4753,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, * this driver uses software swapping. */ reg &= ~PCI_REV_DESC; - err = pci_write_config_dword(pdev,PCI_DEV_REG2, reg); + err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg); if (err) { dev_err(&pdev->dev, "PCI write config failed\n"); goto err_out_free_regions; diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 084eff21b67a87b5300c733eb298c45dbfee1bdd..61891a6cacc2350f4e8fb7e32a0e1cc87ee2babf 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2161,21 +2161,21 @@ struct sky2_tx_le { __le16 length; /* also vlan tag or checksum start */ u8 ctrl; u8 opcode; -} __attribute((packed)); +} __packed; struct sky2_rx_le { __le32 addr; __le16 length; u8 ctrl; u8 opcode; -} __attribute((packed)); +} __packed; struct sky2_status_le { __le32 status; /* also checksum */ __le16 length; /* also vlan tag */ u8 css; u8 opcode; -} __attribute((packed)); +} __packed; struct tx_ring_info { struct sk_buff *skb; diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index cc559741b0facf24b076e8b96eabe1c90ad78170..0909ae934ad0fcee52c5eb67101240e77fb70dde 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -84,8 +84,7 @@ struct smsc911x_data { */ spinlock_t mac_lock; - /* spinlock to ensure 16-bit accesses are serialised. - * unused with a 32-bit bus */ + /* spinlock to ensure register accesses are serialised */ spinlock_t dev_lock; struct phy_device *phy_dev; @@ -118,37 +117,33 @@ struct smsc911x_data { unsigned int hashlo; }; -/* The 16-bit access functions are significantly slower, due to the locking - * necessary. If your bus hardware can be configured to do this for you - * (in response to a single 32-bit operation from software), you should use - * the 32-bit access functions instead. */ - -static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) { if (pdata->config.flags & SMSC911X_USE_32BIT) return readl(pdata->ioaddr + reg); - if (pdata->config.flags & SMSC911X_USE_16BIT) { - u32 data; - unsigned long flags; - - /* these two 16-bit reads must be performed consecutively, so - * must not be interrupted by our own ISR (which would start - * another read operation) */ - spin_lock_irqsave(&pdata->dev_lock, flags); - data = ((readw(pdata->ioaddr + reg) & 0xFFFF) | + if (pdata->config.flags & SMSC911X_USE_16BIT) + return ((readw(pdata->ioaddr + reg) & 0xFFFF) | ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); - spin_unlock_irqrestore(&pdata->dev_lock, flags); - - return data; - } BUG(); return 0; } -static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, - u32 val) +static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + u32 data; + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + data = __smsc911x_reg_read(pdata, reg); + spin_unlock_irqrestore(&pdata->dev_lock, flags); + + return data; +} + +static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) { if (pdata->config.flags & SMSC911X_USE_32BIT) { writel(val, pdata->ioaddr + reg); @@ -156,44 +151,54 @@ static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, } if (pdata->config.flags & SMSC911X_USE_16BIT) { - unsigned long flags; - - /* these two 16-bit writes must be performed consecutively, so - * must not be interrupted by our own ISR (which would start - * another read operation) */ - spin_lock_irqsave(&pdata->dev_lock, flags); writew(val & 0xFFFF, pdata->ioaddr + reg); writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); - spin_unlock_irqrestore(&pdata->dev_lock, flags); return; } BUG(); } +static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + __smsc911x_reg_write(pdata, reg, val); + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + /* Writes a packet to the TX_DATA_FIFO */ static inline void smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, unsigned int wordcount) { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { while (wordcount--) - smsc911x_reg_write(pdata, TX_DATA_FIFO, swab32(*buf++)); - return; + __smsc911x_reg_write(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; } if (pdata->config.flags & SMSC911X_USE_32BIT) { writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); - return; + goto out; } if (pdata->config.flags & SMSC911X_USE_16BIT) { while (wordcount--) - smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); - return; + __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); + goto out; } BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); } /* Reads a packet out of the RX_DATA_FIFO */ @@ -201,24 +206,31 @@ static inline void smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, unsigned int wordcount) { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { while (wordcount--) - *buf++ = swab32(smsc911x_reg_read(pdata, RX_DATA_FIFO)); - return; + *buf++ = swab32(__smsc911x_reg_read(pdata, + RX_DATA_FIFO)); + goto out; } if (pdata->config.flags & SMSC911X_USE_32BIT) { readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); - return; + goto out; } if (pdata->config.flags & SMSC911X_USE_16BIT) { while (wordcount--) - *buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO); - return; + *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO); + goto out; } BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); } /* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read @@ -1538,7 +1550,7 @@ static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!netif_running(dev) || !pdata->phy_dev) return -EINVAL; - return phy_mii_ioctl(pdata->phy_dev, if_mii(ifr), cmd); + return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); } static int diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index 6cdee6a15f9f2897fa06baa40ad5c34c90c2e27d..b09ee1c319e8cb56f4813cad857765bf10fce87b 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c @@ -245,7 +245,7 @@ static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!netif_running(dev) || !pd->phy_dev) return -EINVAL; - return phy_mii_ioctl(pd->phy_dev, if_mii(ifr), cmd); + return phy_mii_ioctl(pd->phy_dev, ifr, cmd); } static int smsc9420_ethtool_get_settings(struct net_device *dev, diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 74b7ae76906e3b482d0c4ad7a47f5c4893adeb7e..a42b6873370b04924af204596baf17342319121b 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -562,7 +562,6 @@ struct netdev_private { unsigned int tx_done; struct napi_struct napi; struct net_device *dev; - struct net_device_stats stats; struct pci_dev *pci_dev; #ifdef VLAN_SUPPORT struct vlan_group *vlgrp; @@ -1174,7 +1173,7 @@ static void tx_timeout(struct net_device *dev) /* Trigger an immediate transmit demand. */ dev->trans_start = jiffies; /* prevent tx timeout */ - np->stats.tx_errors++; + dev->stats.tx_errors++; netif_wake_queue(dev); } @@ -1265,7 +1264,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) } if (skb->ip_summed == CHECKSUM_PARTIAL) { status |= TxCalTCP; - np->stats.tx_compressed++; + dev->stats.tx_compressed++; } status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16); @@ -1374,7 +1373,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n", dev->name, np->dirty_tx, np->tx_done, tx_status); if ((tx_status & 0xe0000000) == 0xa0000000) { - np->stats.tx_packets++; + dev->stats.tx_packets++; } else if ((tx_status & 0xe0000000) == 0x80000000) { u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc); struct sk_buff *skb = np->tx_info[entry].skb; @@ -1462,9 +1461,9 @@ static int __netdev_rx(struct net_device *dev, int *quota) /* There was an error. */ if (debug > 2) printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status); - np->stats.rx_errors++; + dev->stats.rx_errors++; if (desc_status & RxFIFOErr) - np->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; goto next_rx; } @@ -1515,7 +1514,7 @@ static int __netdev_rx(struct net_device *dev, int *quota) #endif if (le16_to_cpu(desc->status2) & 0x0100) { skb->ip_summed = CHECKSUM_UNNECESSARY; - np->stats.rx_compressed++; + dev->stats.rx_compressed++; } /* * This feature doesn't seem to be working, at least @@ -1547,7 +1546,7 @@ static int __netdev_rx(struct net_device *dev, int *quota) } else #endif /* VLAN_SUPPORT */ netif_receive_skb(skb); - np->stats.rx_packets++; + dev->stats.rx_packets++; next_rx: np->cur_rx++; @@ -1717,12 +1716,12 @@ static void netdev_error(struct net_device *dev, int intr_status) printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name); } if (intr_status & IntrRxGFPDead) { - np->stats.rx_fifo_errors++; - np->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + dev->stats.rx_errors++; } if (intr_status & (IntrNoTxCsum | IntrDMAErr)) { - np->stats.tx_fifo_errors++; - np->stats.tx_errors++; + dev->stats.tx_fifo_errors++; + dev->stats.tx_errors++; } if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug) printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n", @@ -1736,24 +1735,24 @@ static struct net_device_stats *get_stats(struct net_device *dev) void __iomem *ioaddr = np->base; /* This adapter architecture needs no SMP locks. */ - np->stats.tx_bytes = readl(ioaddr + 0x57010); - np->stats.rx_bytes = readl(ioaddr + 0x57044); - np->stats.tx_packets = readl(ioaddr + 0x57000); - np->stats.tx_aborted_errors = + dev->stats.tx_bytes = readl(ioaddr + 0x57010); + dev->stats.rx_bytes = readl(ioaddr + 0x57044); + dev->stats.tx_packets = readl(ioaddr + 0x57000); + dev->stats.tx_aborted_errors = readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028); - np->stats.tx_window_errors = readl(ioaddr + 0x57018); - np->stats.collisions = + dev->stats.tx_window_errors = readl(ioaddr + 0x57018); + dev->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008); /* The chip only need report frame silently dropped. */ - np->stats.rx_dropped += readw(ioaddr + RxDMAStatus); + dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus); writew(0, ioaddr + RxDMAStatus); - np->stats.rx_crc_errors = readl(ioaddr + 0x5703C); - np->stats.rx_frame_errors = readl(ioaddr + 0x57040); - np->stats.rx_length_errors = readl(ioaddr + 0x57058); - np->stats.rx_missed_errors = readl(ioaddr + 0x5707C); + dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C); + dev->stats.rx_frame_errors = readl(ioaddr + 0x57040); + dev->stats.rx_length_errors = readl(ioaddr + 0x57058); + dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C); - return &np->stats; + return &dev->stats; } diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h index 144f76fd3e39c71cd7870acb01a5db211f99a860..66b9da0260fe70b6177c6980604f24184dcdb431 100644 --- a/drivers/net/stmmac/common.h +++ b/drivers/net/stmmac/common.h @@ -108,6 +108,7 @@ enum rx_frame_status { /* IPC status */ good_frame = 0, discard_frame = 1, csum_none = 2, + llc_snap = 4, }; enum tx_dma_irq_status { diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h index d8d0f3553770333145ca016a0277cc8b526f99fa..8b20b19971cbe6e0156ce5bee9edc96f73f06785 100644 --- a/drivers/net/stmmac/dwmac1000.h +++ b/drivers/net/stmmac/dwmac1000.h @@ -93,7 +93,7 @@ enum inter_frame_gap { #define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ #define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ #define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ -#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */ +#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Stripping */ #define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ #define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c index 917b4e16923b800b550fee4502fae7239d67657b..2b2f5c8caf1c52ea010843789417bfb40768366b 100644 --- a/drivers/net/stmmac/dwmac1000_core.c +++ b/drivers/net/stmmac/dwmac1000_core.c @@ -220,6 +220,8 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr) ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + if (!mac) + return NULL; mac->mac = &dwmac1000_ops; mac->dma = &dwmac1000_dma_ops; diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c index 6f270a0e151afa8974356920ea72584293354e5a..2fb165fa2ba075b256533a7d1058ea00c36f2b33 100644 --- a/drivers/net/stmmac/dwmac100_core.c +++ b/drivers/net/stmmac/dwmac100_core.c @@ -179,6 +179,8 @@ struct mac_device_info *dwmac100_setup(unsigned long ioaddr) struct mac_device_info *mac; mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + if (!mac) + return NULL; pr_info("\tDWMAC100\n"); diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c index 3c18ebece043a303ee2eb3e3e7ec684aae1bac9b..f612f986a7e16b186ba5c5c1aafcc4ae98fa2313 100644 --- a/drivers/net/stmmac/enh_desc.c +++ b/drivers/net/stmmac/enh_desc.c @@ -123,7 +123,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) */ if (status == 0x0) { CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); - ret = good_frame; + ret = llc_snap; } else if (status == 0x4) { CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); ret = good_frame; diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index a31d580f306d90c206bc6b90e56cf9bd82a597d9..bbb7951b9c4c34bb3992deed9c7a8e84ff52f68e 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -829,7 +829,6 @@ static int stmmac_open(struct net_device *dev) * In case of failure continue without timer. */ if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { pr_warning("stmmaceth: cannot attach the external timer.\n"); - tmrate = 0; priv->tm->freq = 0; priv->tm->timer_start = stmmac_no_timer_started; priv->tm->timer_stop = stmmac_no_timer_stopped; @@ -1217,9 +1216,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) priv->dev->stats.rx_errors++; else { struct sk_buff *skb; - /* Length should omit the CRC */ - int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4; + int frame_len; + frame_len = priv->hw->desc->get_rx_frame_len(p); + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 + * Type frames (LLC/LLC-SNAP) */ + if (unlikely(status != llc_snap)) + frame_len -= ETH_FCS_LEN; #ifdef STMMAC_RX_DEBUG if (frame_len > ETH_FRAME_LEN) pr_debug("\tRX frame size %d, COE status: %d\n", @@ -1437,24 +1440,18 @@ static void stmmac_poll_controller(struct net_device *dev) static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct stmmac_priv *priv = netdev_priv(dev); - int ret = -EOPNOTSUPP; + int ret; if (!netif_running(dev)) return -EINVAL; - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - if (!priv->phydev) - return -EINVAL; - - spin_lock(&priv->lock); - ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); - spin_unlock(&priv->lock); - default: - break; - } + if (!priv->phydev) + return -EINVAL; + + spin_lock(&priv->lock); + ret = phy_mii_ioctl(priv->phydev, rq, cmd); + spin_unlock(&priv->lock); + return ret; } @@ -1564,15 +1561,15 @@ static int stmmac_mac_device_setup(struct net_device *dev) else device = dwmac100_setup(ioaddr); + if (!device) + return -ENOMEM; + if (priv->enh_desc) { device->desc = &enh_desc_ops; pr_info("\tEnhanced descriptor structure\n"); } else device->desc = &ndesc_ops; - if (!device) - return -ENOMEM; - priv->hw = device; priv->wolenabled = priv->hw->pmt; /* PMT supported */ diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 151312342243d3c784613ed07e2dd2f7aec2ac1f..b6ae53bada7500a92a18a7ebe3ec5b07ebed2c1c 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -142,7 +142,6 @@ static void sun3_82586_rnr_int(struct net_device *dev); struct priv { - struct net_device_stats stats; unsigned long base; char *memtop; long int lock; @@ -788,10 +787,10 @@ static void sun3_82586_rcv_int(struct net_device *dev) skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); - p->stats.rx_packets++; + dev->stats.rx_packets++; } else - p->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { @@ -812,13 +811,13 @@ static void sun3_82586_rcv_int(struct net_device *dev) totlen += rstat & RBD_MASK; rbd->status = 0; printk("%s: received oversized frame! length: %d\n",dev->name,totlen); - p->stats.rx_dropped++; + dev->stats.rx_dropped++; } } else /* frame !(ok), only with 'save-bad-frames' */ { printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); - p->stats.rx_errors++; + dev->stats.rx_errors++; } p->rfd_top->stat_high = 0; p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ @@ -885,7 +884,7 @@ static void sun3_82586_rnr_int(struct net_device *dev) { struct priv *p = netdev_priv(dev); - p->stats.rx_errors++; + dev->stats.rx_errors++; WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ @@ -918,29 +917,29 @@ static void sun3_82586_xmt_int(struct net_device *dev) if(status & STAT_OK) { - p->stats.tx_packets++; - p->stats.collisions += (status & TCMD_MAXCOLLMASK); + dev->stats.tx_packets++; + dev->stats.collisions += (status & TCMD_MAXCOLLMASK); } else { - p->stats.tx_errors++; + dev->stats.tx_errors++; if(status & TCMD_LATECOLL) { printk("%s: late collision detected.\n",dev->name); - p->stats.collisions++; + dev->stats.collisions++; } else if(status & TCMD_NOCARRIER) { - p->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; printk("%s: no carrier detected.\n",dev->name); } else if(status & TCMD_LOSTCTS) printk("%s: loss of CTS detected.\n",dev->name); else if(status & TCMD_UNDERRUN) { - p->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk("%s: DMA underrun detected.\n",dev->name); } else if(status & TCMD_MAXCOLL) { printk("%s: Max. collisions exceeded.\n",dev->name); - p->stats.collisions += 16; + dev->stats.collisions += 16; } } @@ -1129,12 +1128,12 @@ static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev) ovrn = swab16(p->scb->ovrn_errs); p->scb->ovrn_errs = 0; - p->stats.rx_crc_errors += crc; - p->stats.rx_fifo_errors += ovrn; - p->stats.rx_frame_errors += aln; - p->stats.rx_dropped += rsc; + dev->stats.rx_crc_errors += crc; + dev->stats.rx_fifo_errors += ovrn; + dev->stats.rx_frame_errors += aln; + dev->stats.rx_dropped += rsc; - return &p->stats; + return &dev->stats; } /******************************************************** diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index be08b75dbc15a3d2d6adaf8d76c6d1de83c4d362..99e423a5b9f1d556211235ae52bfe3ce422a0254 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -2066,7 +2066,7 @@ static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EINVAL; if (!lp->phy_dev) return -ENODEV; - return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd); + return phy_mii_ioctl(lp->phy_dev, rq, cmd); } static void tc35815_chip_reset(struct net_device *dev) diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h index cff98d07cba8c0a44e3751cbe5c87a449bf9423c..67e3b71bf7059de90cb861cc2f23e8d6ff252020 100644 --- a/drivers/net/tehuti.h +++ b/drivers/net/tehuti.h @@ -334,7 +334,7 @@ struct txd_desc { u32 va_lo; u32 va_hi; struct pbl pbl[0]; /* Fragments */ -} __attribute__ ((packed)); +} __packed; /* Register region size */ #define BDX_REGS_SIZE 0x1000 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 573054ae7b58687dbe52eee0be849f0436159e1c..bc3af78a869ff52881077b89cf6e5e544b6e8a91 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -67,8 +68,11 @@ #include "tg3.h" #define DRV_MODULE_NAME "tg3" -#define DRV_MODULE_VERSION "3.110" -#define DRV_MODULE_RELDATE "April 9, 2010" +#define TG3_MAJ_NUM 3 +#define TG3_MIN_NUM 113 +#define DRV_MODULE_VERSION \ + __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) +#define DRV_MODULE_RELDATE "August 2, 2010" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -145,8 +149,6 @@ #define TG3_RX_JMB_BUFF_RING_SIZE \ (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) -#define TG3_RSS_MIN_NUM_MSIX_VECS 2 - /* Due to a hardware bug, the 5701 can only DMA to memory addresses * that are at least dword aligned when used in PCIX mode. The driver * works around this bug by double copying the packet. This workaround @@ -219,12 +221,9 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, @@ -272,6 +271,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -585,18 +585,23 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) static void tg3_ape_lock_init(struct tg3 *tp) { int i; + u32 regbase; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + regbase = TG3_APE_LOCK_GRANT; + else + regbase = TG3_APE_PER_LOCK_GRANT; /* Make sure the driver hasn't any stale locks. */ for (i = 0; i < 8; i++) - tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i, - APE_LOCK_GRANT_DRIVER); + tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); } static int tg3_ape_lock(struct tg3 *tp, int locknum) { int i, off; int ret = 0; - u32 status; + u32 status, req, gnt; if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) return 0; @@ -609,13 +614,21 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) return -EINVAL; } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + req = TG3_APE_LOCK_REQ; + gnt = TG3_APE_LOCK_GRANT; + } else { + req = TG3_APE_PER_LOCK_REQ; + gnt = TG3_APE_PER_LOCK_GRANT; + } + off = 4 * locknum; - tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER); + tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER); /* Wait for up to 1 millisecond to acquire lock. */ for (i = 0; i < 100; i++) { - status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off); + status = tg3_ape_read32(tp, gnt + off); if (status == APE_LOCK_GRANT_DRIVER) break; udelay(10); @@ -623,7 +636,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) if (status != APE_LOCK_GRANT_DRIVER) { /* Revoke the lock request. */ - tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, + tg3_ape_write32(tp, gnt + off, APE_LOCK_GRANT_DRIVER); ret = -EBUSY; @@ -634,7 +647,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) static void tg3_ape_unlock(struct tg3 *tp, int locknum) { - int off; + u32 gnt; if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) return; @@ -647,8 +660,12 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) return; } - off = 4 * locknum; - tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + gnt = TG3_APE_LOCK_GRANT; + else + gnt = TG3_APE_PER_LOCK_GRANT; + + tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER); } static void tg3_disable_ints(struct tg3 *tp) @@ -862,7 +879,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) unsigned int loops; int ret; - if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) return 0; @@ -1069,14 +1086,11 @@ static int tg3_mdio_init(struct tg3 *tp) u32 reg; struct phy_device *phydev; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - u32 funcnum, is_serdes; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + u32 is_serdes; - funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC; - if (funcnum) - tp->phy_addr = 2; - else - tp->phy_addr = 1; + tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; @@ -1161,7 +1175,7 @@ static int tg3_mdio_init(struct tg3 *tp) case PHY_ID_BCMAC131: phydev->interface = PHY_INTERFACE_MODE_MII; phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; - tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + tp->phy_flags |= TG3_PHYFLG_IS_FET; break; } @@ -1254,7 +1268,7 @@ static void tg3_ump_link_report(struct tg3 *tp) tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); val = 0; - if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { + if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { if (!tg3_readphy(tp, MII_CTRL1000, ®)) val = reg << 16; if (!tg3_readphy(tp, MII_STAT1000, ®)) @@ -1362,7 +1376,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) if (autoneg == AUTONEG_ENABLE && (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); else flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); @@ -1476,7 +1490,7 @@ static int tg3_phy_init(struct tg3 *tp) { struct phy_device *phydev; - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) return 0; /* Bring the PHY back to a known state. */ @@ -1496,7 +1510,7 @@ static int tg3_phy_init(struct tg3 *tp) switch (phydev->interface) { case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { phydev->supported &= (PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause); @@ -1513,7 +1527,7 @@ static int tg3_phy_init(struct tg3 *tp) return -EINVAL; } - tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED; + tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; phydev->advertising = phydev->supported; @@ -1524,13 +1538,13 @@ static void tg3_phy_start(struct tg3 *tp) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - if (tp->link_config.phy_is_low_power) { - tp->link_config.phy_is_low_power = 0; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; phydev->speed = tp->link_config.orig_speed; phydev->duplex = tp->link_config.orig_duplex; phydev->autoneg = tp->link_config.orig_autoneg; @@ -1544,7 +1558,7 @@ static void tg3_phy_start(struct tg3 *tp) static void tg3_phy_stop(struct tg3 *tp) { - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); @@ -1552,16 +1566,21 @@ static void tg3_phy_stop(struct tg3 *tp) static void tg3_phy_fini(struct tg3 *tp) { - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); - tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; + tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } -static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) { - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; } static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) @@ -1589,11 +1608,12 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) u32 reg; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && - (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) + ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) return; - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_phy_fet_toggle_apd(tp, enable); return; } @@ -1624,10 +1644,10 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) u32 phy; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) return; - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 ephy; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { @@ -1663,7 +1683,7 @@ static void tg3_phy_set_wirespeed(struct tg3 *tp) { u32 val; - if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) + if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) return; if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && @@ -1722,7 +1742,7 @@ static int tg3_wait_macro_done(struct tg3 *tp) while (limit--) { u32 tmp32; - if (!tg3_readphy(tp, 0x16, &tmp32)) { + if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { if ((tmp32 & 0x1000) == 0) break; } @@ -1748,13 +1768,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0002); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); for (i = 0; i < 6; i++) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, test_pat[chan][i]); - tg3_writephy(tp, 0x16, 0x0202); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; @@ -1762,13 +1782,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0082); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; } - tg3_writephy(tp, 0x16, 0x0802); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); if (tg3_wait_macro_done(tp)) { *resetp = 1; return -EBUSY; @@ -1808,10 +1828,10 @@ static int tg3_phy_reset_chanpat(struct tg3 *tp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, (chan * 0x2000) | 0x0200); - tg3_writephy(tp, 0x16, 0x0002); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); for (i = 0; i < 6; i++) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); - tg3_writephy(tp, 0x16, 0x0202); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); if (tg3_wait_macro_done(tp)) return -EBUSY; } @@ -1857,8 +1877,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); /* Block the PHY control access. */ - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800); + tg3_phydsp_write(tp, 0x8005, 0x0800); err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); if (!err) @@ -1869,11 +1888,10 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) if (err) return err; - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000); + tg3_phydsp_write(tp, 0x8005, 0x0000); tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); - tg3_writephy(tp, 0x16, 0x0000); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { @@ -1964,43 +1982,39 @@ static int tg3_phy_reset(struct tg3 *tp) } } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && - (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) return 0; tg3_phy_apply_otp(tp); - if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); else tg3_phy_toggle_apd(tp, false); out: - if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { + if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323); + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); } - if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { - tg3_writephy(tp, 0x1c, 0x8d68); - tg3_writephy(tp, 0x1c, 0x8d68); + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); } - if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506); - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); - } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); tg3_writephy(tp, MII_TG3_TEST1, MII_TG3_TEST1_TRIM_EN | 0x4); @@ -2049,6 +2063,7 @@ static void tg3_frob_aux_power(struct tg3 *tp) /* The GPIOs do something completely different on 57765. */ if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) return; @@ -2184,7 +2199,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); u32 serdes_cfg = tr32(MAC_SERDES_CFG); @@ -2203,7 +2218,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); udelay(40); return; - } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { u32 phytest; if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { u32 phy; @@ -2240,7 +2255,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && - (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) return; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || @@ -2543,14 +2558,14 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { do_low_power = false; - if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) && - !tp->link_config.phy_is_low_power) { + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { struct phy_device *phydev; u32 phyid, advertising; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - tp->link_config.phy_is_low_power = 1; + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; tp->link_config.orig_speed = phydev->speed; tp->link_config.orig_duplex = phydev->duplex; @@ -2589,14 +2604,14 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) } else { do_low_power = true; - if (tp->link_config.phy_is_low_power == 0) { - tp->link_config.phy_is_low_power = 1; + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; tp->link_config.orig_speed = tp->link_config.speed; tp->link_config.orig_duplex = tp->link_config.duplex; tp->link_config.orig_autoneg = tp->link_config.autoneg; } - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { tp->link_config.speed = SPEED_10; tp->link_config.duplex = DUPLEX_HALF; tp->link_config.autoneg = AUTONEG_ENABLE; @@ -2629,13 +2644,13 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) if (device_should_wake) { u32 mac_mode; - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { if (do_low_power) { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); udelay(40); } - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) mac_mode = MAC_MODE_PORT_MODE_GMII; else mac_mode = MAC_MODE_PORT_MODE_MII; @@ -2803,7 +2818,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 break; default: - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : SPEED_10; *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : @@ -2821,7 +2836,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp) u32 new_adv; int i; - if (tp->link_config.phy_is_low_power) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { /* Entering low power mode. Disable gigabit and * 100baseT advertisements. */ @@ -2834,7 +2849,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp) tg3_writephy(tp, MII_ADVERTISE, new_adv); } else if (tp->link_config.speed == SPEED_INVALID) { - if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) tp->link_config.advertising &= ~(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); @@ -2860,7 +2875,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp) new_adv |= MII_TG3_CTRL_ADV_1000_HALF; if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) new_adv |= MII_TG3_CTRL_ADV_1000_FULL; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) && (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) new_adv |= (MII_TG3_CTRL_AS_MASTER | @@ -2962,20 +2977,11 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) /* Set Extended packet length bit */ err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); - - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); - - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132); - - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232); - - err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); - err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20); + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); udelay(40); @@ -3000,7 +3006,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) if ((adv_reg & all_mask) != all_mask) return 0; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 tg3_ctrl; all_mask = 0; @@ -3128,18 +3134,18 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { /* 5701 {A0,B0} CRC bug workaround */ tg3_writephy(tp, 0x15, 0x0a75); - tg3_writephy(tp, 0x1c, 0x8c68); - tg3_writephy(tp, 0x1c, 0x8d68); - tg3_writephy(tp, 0x1c, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); } /* Clear pending interrupts... */ tg3_readphy(tp, MII_TG3_ISTAT, &dummy); tg3_readphy(tp, MII_TG3_ISTAT, &dummy); - if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); - else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) tg3_writephy(tp, MII_TG3_IMASK, ~0); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -3155,7 +3161,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; - if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { u32 val; tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); @@ -3231,7 +3237,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) } relink: - if (current_link_up == 0 || tp->link_config.phy_is_low_power) { + if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { u32 tmp; tg3_phy_copper_begin(tp); @@ -3249,7 +3255,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) tp->mac_mode |= MAC_MODE_PORT_MODE_MII; else tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; @@ -3800,7 +3806,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; if (sg_dig_ctrl != expected_sg_dig_ctrl) { - if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && tp->serdes_counter && ((mac_status & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_RCVD_CFG)) == @@ -3817,7 +3823,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } else if (mac_status & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET)) { sg_dig_status = tr32(SG_DIG_STATUS); @@ -3840,7 +3846,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; tp->serdes_counter = 0; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { if (tp->serdes_counter) tp->serdes_counter--; @@ -3867,8 +3873,8 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) !(mac_status & MAC_STATUS_RCVD_CFG)) { tg3_setup_flow_control(tp, 0, 0); current_link_up = 1; - tp->tg3_flags2 |= - TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; tp->serdes_counter = SERDES_PARALLEL_DET_TIMEOUT; } else @@ -3877,7 +3883,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) } } else { tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } out: @@ -4094,7 +4100,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) err |= tg3_readphy(tp, MII_BMCR, &bmcr); if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { u32 adv, new_adv; @@ -4119,7 +4125,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; return err; } @@ -4164,7 +4170,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) else bmsr &= ~BMSR_LSTATUS; } - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } } @@ -4191,6 +4197,8 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; + } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + /* Link is up via parallel detect */ } else { current_link_up = 0; } @@ -4217,7 +4225,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) netif_carrier_on(tp->dev); else { netif_carrier_off(tp->dev); - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } tg3_link_report(tp); } @@ -4241,13 +4249,14 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) u32 phy1, phy2; /* Select shadow register 0x1f */ - tg3_writephy(tp, 0x1c, 0x7c00); - tg3_readphy(tp, 0x1c, &phy1); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); /* Select expansion interrupt status register */ - tg3_writephy(tp, 0x17, 0x0f01); - tg3_readphy(tp, 0x15, &phy2); - tg3_readphy(tp, 0x15, &phy2); + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); if ((phy1 & 0x10) && !(phy2 & 0x20)) { /* We have signal detect and not receiving @@ -4258,17 +4267,18 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) bmcr &= ~BMCR_ANENABLE; bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; tg3_writephy(tp, MII_BMCR, bmcr); - tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; } } } else if (netif_carrier_ok(tp->dev) && (tp->link_config.autoneg == AUTONEG_ENABLE) && - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { u32 phy2; /* Select expansion interrupt status register */ - tg3_writephy(tp, 0x17, 0x0f01); - tg3_readphy(tp, 0x15, &phy2); + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); if (phy2 & 0x20) { u32 bmcr; @@ -4276,7 +4286,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) tg3_readphy(tp, MII_BMCR, &bmcr); tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } } @@ -4286,9 +4296,9 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) { int err; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) err = tg3_setup_fiber_phy(tp, force_reset); - else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) err = tg3_setup_fiber_mii_phy(tp, force_reset); else err = tg3_setup_copper_phy(tp, force_reset); @@ -4367,7 +4377,8 @@ static void tg3_tx_recover(struct tg3 *tp) static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) { - smp_mb(); + /* Tell compiler to fetch tx indices from memory. */ + barrier(); return tnapi->tx_pending - ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); } @@ -5552,8 +5563,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, entry = tnapi->tx_prod; base_flags = 0; - mss = 0; - if ((mss = skb_shinfo(skb)->gso_size) != 0) { + mss = skb_shinfo(skb)->gso_size; + if (mss) { int tcp_opt_len, ip_tcp_len; u32 hdrlen; @@ -5651,6 +5662,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, tnapi->tx_prod = entry; if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) netif_tx_wake_queue(txq); } @@ -5696,6 +5714,13 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) /* Estimate the number of fragments in the worst case */ if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { netif_stop_queue(tp->dev); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) return NETDEV_TX_BUSY; @@ -5759,9 +5784,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; - if ((mss = skb_shinfo(skb)->gso_size) != 0) { + mss = skb_shinfo(skb)->gso_size; + if (mss) { struct iphdr *iph; - u32 tcp_opt_len, ip_tcp_len, hdr_len; + u32 tcp_opt_len, hdr_len; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { @@ -5769,10 +5795,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, goto out_unlock; } + iph = ip_hdr(skb); tcp_opt_len = tcp_optlen(skb); - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - hdr_len = ip_tcp_len + tcp_opt_len; + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + hdr_len = skb_headlen(skb) - ETH_HLEN; + } else { + u32 ip_tcp_len; + + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); + hdr_len = ip_tcp_len + tcp_opt_len; + + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); + } + if (unlikely((ETH_HLEN + hdr_len) > 80) && (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) return tg3_tso_bug(tp, skb); @@ -5780,9 +5817,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); - iph = ip_hdr(skb); - iph->check = 0; - iph->tot_len = htons(mss + hdr_len); if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { tcp_hdr(skb)->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; @@ -5922,6 +5956,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, tnapi->tx_prod = entry; if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) netif_tx_wake_queue(txq); } @@ -6212,6 +6253,8 @@ static void tg3_free_rings(struct tg3 *tp) for (j = 0; j < tp->irq_cnt; j++) { struct tg3_napi *tnapi = &tp->napi[j]; + tg3_rx_prodring_free(tp, &tp->prodring[j]); + if (!tnapi->tx_buffers) continue; @@ -6247,8 +6290,6 @@ static void tg3_free_rings(struct tg3 *tp) dev_kfree_skb_any(skb); } - - tg3_rx_prodring_free(tp, &tp->prodring[j]); } } @@ -6603,7 +6644,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, - APE_HOST_DRIVER_ID_MAGIC); + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, APE_HOST_BEHAV_NO_PHYLOCK); @@ -6782,7 +6823,8 @@ static void tg3_restore_pci_state(struct tg3 *tp) /* Allow reads and writes to the APE register and memory space. */ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) val |= PCISTATE_ALLOW_APE_CTLSPC_WR | - PCISTATE_ALLOW_APE_SHMEM_WR; + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); @@ -6897,9 +6939,13 @@ static int tg3_chip_reset(struct tg3 *tp) val = GRC_MISC_CFG_CORECLK_RESET; if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { - if (tr32(0x7e2c) == 0x60) { - tw32(0x7e2c, 0x20); - } + /* Force PCIe 1.0a mode */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && + tr32(TG3_PCIE_PHY_TSTCTL) == + (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) + tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { tw32(GRC_MISC_CFG, (1 << 29)); val |= (1 << 29); @@ -6912,8 +6958,11 @@ static int tg3_chip_reset(struct tg3 *tp) tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); } - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + /* Manage gphy power for all CPMU absent PCIe devices. */ + if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) val |= GRC_MISC_CFG_KEEP_GPHY_POWER; + tw32(GRC_MISC_CFG, val); /* restore 5701 hardware bug workaround write method */ @@ -6970,8 +7019,7 @@ static int tg3_chip_reset(struct tg3 *tp) * Older PCIe devices only support the 128 byte * MPS setting. Enforce the restriction. */ - if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)) + if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; pci_write_config_word(tp->pdev, tp->pcie_cap + PCI_EXP_DEVCTL, @@ -7018,10 +7066,10 @@ static int tg3_chip_reset(struct tg3 *tp) tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); } - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_TBI; tw32_f(MAC_MODE, tp->mac_mode); - } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { + } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { tp->mac_mode = MAC_MODE_PORT_MODE_GMII; tw32_f(MAC_MODE, tp->mac_mode); } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { @@ -7041,35 +7089,10 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_mdio_start(tp); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { - u8 phy_addr; - - phy_addr = tp->phy_addr; - tp->phy_addr = TG3_PHY_PCIE_ADDR; - - tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, - TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT); - val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL | - TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL | - TG3_PCIEPHY_TX0CTRL1_NB_EN; - tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val); - udelay(10); - - tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, - TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT); - val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN | - TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN; - tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val); - udelay(10); - - tp->phy_addr = phy_addr; - } - if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { + !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); @@ -7427,7 +7450,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) tw32(HOSTCC_TXCOAL_MAXF_INT, 0); } - if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); @@ -7504,7 +7527,8 @@ static void tg3_rings_reset(struct tg3 *tp) /* Disable all receive return rings but the first. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; @@ -7720,7 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) */ val = tr32(TG3PCI_PCISTATE); val |= PCISTATE_ALLOW_APE_CTLSPC_WR | - PCISTATE_ALLOW_APE_SHMEM_WR; + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; tw32(TG3PCI_PCISTATE, val); } @@ -7740,8 +7765,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { val = tr32(TG3PCI_DMA_RW_CTRL) & ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) @@ -7869,7 +7893,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_std_mapping & 0xffffffff)); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC); @@ -7894,7 +7919,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | BDINFO_FLAGS_USE_EXT_RECV); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { @@ -7902,8 +7928,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) BDINFO_FLAGS_DISABLED); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | (TG3_RX_STD_DMA_SZ << 2); else @@ -7920,8 +7945,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->rx_jumbo_pending : 0; tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { tw32(STD_REPLENISH_LWM, 32); tw32(JMB_REPLENISH_LWM, 16); } @@ -7956,7 +7980,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | RDMAC_MODE_LNGREAD_ENAB); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || @@ -8048,8 +8073,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { - tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; /* reset to prevent losing 1st rx packet intermittently */ tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); @@ -8062,7 +8087,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) tp->mac_mode |= MAC_MODE_LINK_POLARITY; tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); @@ -8195,6 +8220,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } tp->tx_mode = TX_MODE_ENABLE; + if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; tw32_f(MAC_TX_MODE, tp->tx_mode); udelay(100); @@ -8244,16 +8272,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(MAC_LED_CTRL, tp->led_ctrl); tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); } tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && - !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { /* Set drive transmission level to 1.2V */ /* only if the signal pre-emphasis bit is not set */ val = tr32(MAC_SERDES_CFG); @@ -8275,12 +8303,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && - (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { /* Use hardware link auto-negotiation */ tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; } - if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { u32 tmp; @@ -8292,8 +8320,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { - if (tp->link_config.phy_is_low_power) { - tp->link_config.phy_is_low_power = 0; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; tp->link_config.speed = tp->link_config.orig_speed; tp->link_config.duplex = tp->link_config.orig_duplex; tp->link_config.autoneg = tp->link_config.orig_autoneg; @@ -8303,15 +8331,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && - !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { u32 tmp; /* Clear CRC stats. */ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { tg3_writephy(tp, MII_TG3_TEST1, tmp | MII_TG3_TEST1_CRC_EN); - tg3_readphy(tp, 0x14, &tmp); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); } } } @@ -8479,7 +8507,7 @@ static void tg3_timer(unsigned long __opaque) mac_stat = tr32(MAC_STATUS); phy_event = 0; - if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { if (mac_stat & MAC_STATUS_MI_INTERRUPT) phy_event = 1; } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) @@ -8495,7 +8523,7 @@ static void tg3_timer(unsigned long __opaque) (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { need_setup = 1; } - if (! netif_carrier_ok(tp->dev) && + if (!netif_carrier_ok(tp->dev) && (mac_stat & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET))) { need_setup = 1; @@ -8511,8 +8539,10 @@ static void tg3_timer(unsigned long __opaque) } tg3_setup_phy(tp, 0); } - } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { tg3_serdes_parallel_detect(tp); + } tp->timer_counter = tp->timer_multiplier; } @@ -8605,8 +8635,7 @@ static int tg3_test_interrupt(struct tg3 *tp) * Turn off MSI one shot mode. Otherwise this test has no * observable way to know whether the interrupt was delivered. */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); @@ -8649,8 +8678,7 @@ static int tg3_test_interrupt(struct tg3 *tp) if (intr_ok) { /* Reenable MSI one shot mode. */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); @@ -8775,9 +8803,9 @@ static bool tg3_enable_msix(struct tg3 *tp) } rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); - if (rc != 0) { - if (rc < TG3_RSS_MIN_NUM_MSIX_VECS) - return false; + if (rc < 0) { + return false; + } else if (rc != 0) { if (pci_enable_msix(tp->pdev, msix_ent, rc)) return false; netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", @@ -8785,16 +8813,19 @@ static bool tg3_enable_msix(struct tg3 *tp) tp->irq_cnt = rc; } - tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; - for (i = 0; i < tp->irq_max; i++) tp->napi[i].irq_vec = msix_ent[i].vector; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; - tp->dev->real_num_tx_queues = tp->irq_cnt - 1; - } else - tp->dev->real_num_tx_queues = 1; + tp->dev->real_num_tx_queues = 1; + if (tp->irq_cnt > 1) { + tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; + tp->dev->real_num_tx_queues = tp->irq_cnt - 1; + } + } return true; } @@ -8838,7 +8869,7 @@ static void tg3_ints_fini(struct tg3 *tp) else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) pci_disable_msi(tp->pdev); tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; - tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS; + tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS); } static int tg3_open(struct net_device *dev) @@ -8942,10 +8973,8 @@ static int tg3_open(struct net_device *dev) goto err_out2; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && - (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && - (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { + if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && + (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { u32 val = tr32(PCIE_TRANSACTION_CFG); tw32(PCIE_TRANSACTION_CFG, @@ -8982,7 +9011,8 @@ static int tg3_open(struct net_device *dev) return err; } -static struct net_device_stats *tg3_get_stats(struct net_device *); +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); static int tg3_close(struct net_device *dev) @@ -9016,8 +9046,8 @@ static int tg3_close(struct net_device *dev) tg3_ints_fini(tp); - memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), - sizeof(tp->net_stats_prev)); + tg3_get_stats64(tp->dev, &tp->net_stats_prev); + memcpy(&tp->estats_prev, tg3_get_estats(tp), sizeof(tp->estats_prev)); @@ -9030,28 +9060,16 @@ static int tg3_close(struct net_device *dev) return 0; } -static inline unsigned long get_stat64(tg3_stat64_t *val) -{ - unsigned long ret; - -#if (BITS_PER_LONG == 32) - ret = val->low; -#else - ret = ((u64)val->high << 32) | ((u64)val->low); -#endif - return ret; -} - -static inline u64 get_estat64(tg3_stat64_t *val) +static inline u64 get_stat64(tg3_stat64_t *val) { return ((u64)val->high << 32) | ((u64)val->low); } -static unsigned long calc_crc_errors(struct tg3 *tp) +static u64 calc_crc_errors(struct tg3 *tp) { struct tg3_hw_stats *hw_stats = tp->hw_stats; - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { u32 val; @@ -9060,7 +9078,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp) if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { tg3_writephy(tp, MII_TG3_TEST1, val | MII_TG3_TEST1_CRC_EN); - tg3_readphy(tp, 0x14, &val); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); } else val = 0; spin_unlock_bh(&tp->lock); @@ -9075,7 +9093,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp) #define ESTAT_ADD(member) \ estats->member = old_estats->member + \ - get_estat64(&hw_stats->member) + get_stat64(&hw_stats->member) static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) { @@ -9165,11 +9183,11 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) return estats; } -static struct net_device_stats *tg3_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tg3 *tp = netdev_priv(dev); - struct net_device_stats *stats = &tp->net_stats; - struct net_device_stats *old_stats = &tp->net_stats_prev; + struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; if (!hw_stats) @@ -9350,13 +9368,13 @@ static void tg3_get_regs(struct net_device *dev, memset(p, 0, TG3_REGDUMP_LEN); - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return; tg3_full_lock(tp, 0); #define __GET_REG32(reg) (*(p)++ = tr32(reg)) -#define GET_REG32_LOOP(base,len) \ +#define GET_REG32_LOOP(base, len) \ do { p = (u32 *)(orig_p + (base)); \ for (i = 0; i < len; i += 4) \ __GET_REG32((base) + i); \ @@ -9429,7 +9447,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) return -EINVAL; - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; offset = eeprom->offset; @@ -9449,7 +9467,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); if (ret) return ret; - memcpy(data, ((char*)&val) + b_offset, b_count); + memcpy(data, ((char *)&val) + b_offset, b_count); len -= b_count; offset += b_count; eeprom->len += b_count; @@ -9491,7 +9509,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *buf; __be32 start, end; - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || @@ -9548,7 +9566,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_ethtool_gset(phydev, cmd); @@ -9556,11 +9574,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->supported = (SUPPORTED_Autoneg); - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) cmd->supported |= (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { cmd->supported |= (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Half | @@ -9591,7 +9609,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; return phy_ethtool_sset(phydev, cmd); @@ -9611,11 +9629,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) mask |= ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) mask |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Half | @@ -9636,7 +9654,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->advertising &= mask; } else { - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { if (cmd->speed != SPEED_1000) return -EINVAL; @@ -9772,11 +9790,11 @@ static int tg3_nway_reset(struct net_device *dev) if (!netif_running(dev)) return -EAGAIN; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) return -EINVAL; if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); } else { @@ -9787,7 +9805,7 @@ static int tg3_nway_reset(struct net_device *dev) tg3_readphy(tp, MII_BMCR, &bmcr); if (!tg3_readphy(tp, MII_BMCR, &bmcr) && ((bmcr & BMCR_ANENABLE) || - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); r = 0; @@ -9922,7 +9940,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam else tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { u32 oldadv = phydev->advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause); if (oldadv != newadv) { @@ -10251,7 +10269,7 @@ static int tg3_test_link(struct tg3 *tp) if (!netif_running(tp->dev)) return -ENODEV; - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) max = TG3_SERDES_TIMEOUT_SEC; else max = TG3_COPPER_TIMEOUT_SEC; @@ -10554,7 +10572,8 @@ static int tg3_test_memory(struct tg3 *tp) int err = 0; int i; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) mem_tbl = mem_tbl_5717; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) mem_tbl = mem_tbl_57765; @@ -10568,8 +10587,8 @@ static int tg3_test_memory(struct tg3 *tp) mem_tbl = mem_tbl_570x; for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { - if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, - mem_tbl[i].len)) != 0) + err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); + if (err) break; } @@ -10612,7 +10631,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) MAC_MODE_PORT_INT_LPBACK; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) mac_mode |= MAC_MODE_LINK_POLARITY; - if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) mac_mode |= MAC_MODE_PORT_MODE_MII; else mac_mode |= MAC_MODE_PORT_MODE_GMII; @@ -10620,7 +10639,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) } else if (loopback_mode == TG3_PHY_LOOPBACK) { u32 val; - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_phy_fet_toggle_apd(tp, false); val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; } else @@ -10632,7 +10651,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) udelay(40); mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; - if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_writephy(tp, MII_TG3_FET_PTEST, MII_TG3_FET_PTEST_FRC_TX_LINK | MII_TG3_FET_PTEST_FRC_TX_LOCK); @@ -10644,7 +10663,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) mac_mode |= MAC_MODE_PORT_MODE_GMII; /* reset to prevent losing 1st rx packet intermittently */ - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); tw32_f(MAC_RX_MODE, tp->rx_mode); @@ -10775,7 +10794,7 @@ static int tg3_test_loopback(struct tg3 *tp) return TG3_LOOPBACK_FAILED; /* Turn off gphy autopowerdown. */ - if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, false); if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { @@ -10812,14 +10831,14 @@ static int tg3_test_loopback(struct tg3 *tp) tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); } - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) err |= TG3_PHY_LOOPBACK_FAILED; } /* Re-enable gphy autopowerdown. */ - if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); return err; @@ -10830,7 +10849,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, { struct tg3 *tp = netdev_priv(dev); - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) tg3_set_power_state(tp, PCI_D0); memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@ -10862,7 +10881,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, if (!err) tg3_nvram_unlock(tp); - if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) tg3_phy_reset(tp); if (tg3_test_registers(tp) != 0) { @@ -10898,7 +10917,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, if (irq_sync && !err2) tg3_phy_start(tp); } - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) tg3_set_power_state(tp, PCI_D3hot); } @@ -10911,10 +10930,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { struct phy_device *phydev; - if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - return phy_mii_ioctl(phydev, data, cmd); + return phy_mii_ioctl(phydev, ifr, cmd); } switch (cmd) { @@ -10925,10 +10944,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: { u32 mii_regval; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -10941,10 +10960,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } case SIOCSMIIREG: - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->link_config.phy_is_low_power) + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11634,7 +11653,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) tg3_get_57780_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tg3_get_5717_nvram_info(tp); else tg3_get_nvram_info(tp); @@ -12070,11 +12090,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->phy_id = eeprom_phy_id; if (eeprom_phy_serdes) { - if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) - tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; else - tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; + tp->phy_flags |= TG3_PHYFLG_MII_SERDES; } if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) @@ -12158,7 +12177,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; @@ -12167,19 +12186,21 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; if (cfg2 & (1 << 17)) - tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; /* serdes signal pre-emphasis in register 0x590 set by */ /* bootcode if bit 18 is set */ if (cfg2 & (1 << 18)) - tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; + tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) - tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD; + tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { u32 cfg3; tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); @@ -12284,9 +12305,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { tp->phy_id = hw_phy_id; if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) - tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; else - tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; + tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; } else { if (tp->phy_id != TG3_PHY_ID_INVALID) { /* Do nothing, phy ID already set up in @@ -12305,11 +12326,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) tp->phy_id = p->phy_id; if (!tp->phy_id || tp->phy_id == TG3_PHY_ID_BCM8002) - tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; } } - if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { u32 bmsr, adv_reg, tg3_ctrl, mask; @@ -12327,7 +12348,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); tg3_ctrl = 0; - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | MII_TG3_CTRL_ADV_1000_FULL); if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || @@ -12342,7 +12363,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) if (!tg3_copper_is_advertising_all(tp, mask)) { tg3_writephy(tp, MII_ADVERTISE, adv_reg); - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); tg3_writephy(tp, MII_BMCR, @@ -12351,7 +12372,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) tg3_phy_set_wirespeed(tp); tg3_writephy(tp, MII_ADVERTISE, adv_reg); - if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); } @@ -12364,13 +12385,13 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) err = tg3_init_5401phy_dsp(tp); } - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) tp->link_config.advertising = (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); - if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) tp->link_config.advertising &= ~(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); @@ -12699,6 +12720,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp) { int vlen; u32 apedata; + char *fwtype; if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) @@ -12714,9 +12736,15 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp) apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); + if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) + fwtype = "NCSI"; + else + fwtype = "DASH"; + vlen = strlen(tp->fw_ver); - snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d", + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", + fwtype, (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, @@ -12760,6 +12788,13 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); +static void inline vlan_features_add(struct net_device *dev, unsigned long flags) +{ +#if TG3_VLAN_TAG_USED + dev->vlan_features |= flags; +#endif +} + static int __devinit tg3_get_invariants(struct tg3 *tp) { static struct pci_device_id write_reorder_chipsets[] = { @@ -12804,7 +12839,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719) pci_read_config_dword(tp->pdev, TG3PCI_GEN2_PRODID_ASICREV, &prod_id_asic_rev); @@ -12962,6 +12998,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) tp->pdev_peer = tg3_find_peer(tp); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->tg3_flags3 |= TG3_FLG3_5717_PLUS; + /* Intentionally exclude ASIC_REV_5906 */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || @@ -12969,8 +13010,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || @@ -12990,16 +13030,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; else { + unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO; + tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; - tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) - tp->dev->features |= NETIF_F_IPV6_CSUM; - tp->dev->features |= NETIF_F_GRO; + features |= NETIF_F_IPV6_CSUM; + tp->dev->features |= features; + vlan_features_add(tp->dev, features); } /* Determine TSO capabilities */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) @@ -13035,14 +13076,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; tp->irq_max = TG3_IRQ_MAX_VECS; } } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { @@ -13050,8 +13091,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || @@ -13242,7 +13282,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * APE register and memory space. */ pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | - PCISTATE_ALLOW_APE_SHMEM_WR; + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); } @@ -13251,8 +13292,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). @@ -13310,40 +13350,39 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; + tp->phy_flags |= TG3_PHYFLG_IS_FET; /* A few boards don't want Ethernet@WireSpeed phy feature */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || - (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || - (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) - tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; + (tp->phy_flags & TG3_PHYFLG_IS_FET) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) - tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; + tp->phy_flags |= TG3_PHYFLG_ADC_BUG; if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) - tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; + tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { + !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) - tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; + tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) - tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; + tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; } else - tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; + tp->phy_flags |= TG3_PHYFLG_BER_BUG; } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && @@ -13372,8 +13411,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) return err; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && - (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 || - (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) return -ENOTSUPP; /* Initialize data/descriptor byte/word swapping. */ @@ -13457,8 +13495,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || - (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) - tp->tg3_flags |= TG3_FLAG_10_100_ONLY; + (tp->phy_flags & TG3_PHYFLG_IS_FET)) + tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; err = tg3_phy_probe(tp); if (err) { @@ -13470,13 +13508,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_read_vpd(tp); tg3_read_fw_ver(tp); - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { - tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; } else { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) - tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; else - tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; } /* 5700 {AX,BX} chips have a broken status block link @@ -13494,13 +13532,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && - !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { - tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | - TG3_FLAG_USE_LINKCHG_REG); + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; } /* For all SERDES we poll the MAC status register. */ - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) tp->tg3_flags |= TG3_FLAG_POLL_SERDES; else tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; @@ -13580,9 +13618,12 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); else tg3_nvram_unlock(tp); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC) + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { + if (PCI_FUNC(tp->pdev->devfn) & 1) mac_offset = 0xcc; + if (PCI_FUNC(tp->pdev->devfn) > 1) + mac_offset += 0x18c; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mac_offset = 0x10; @@ -13667,8 +13708,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) #endif #endif - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; goto out; } @@ -13879,8 +13919,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) goto out; if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { @@ -14070,7 +14109,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) tp->link_config.autoneg = AUTONEG_ENABLE; tp->link_config.active_speed = SPEED_INVALID; tp->link_config.active_duplex = DUPLEX_INVALID; - tp->link_config.phy_is_low_power = 0; tp->link_config.orig_speed = SPEED_INVALID; tp->link_config.orig_duplex = DUPLEX_INVALID; tp->link_config.orig_autoneg = AUTONEG_INVALID; @@ -14078,8 +14116,7 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { tp->bufmgr_config.mbuf_read_dma_low_water = DEFAULT_MB_RDMA_LOW_WATER_5705; tp->bufmgr_config.mbuf_mac_rx_low_water = @@ -14156,6 +14193,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case TG3_PHY_ID_BCM5718C: return "5718C"; case TG3_PHY_ID_BCM5718S: return "5718S"; case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; case TG3_PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -14261,7 +14299,7 @@ static const struct net_device_ops tg3_netdev_ops = { .ndo_open = tg3_open, .ndo_stop = tg3_close, .ndo_start_xmit = tg3_start_xmit, - .ndo_get_stats = tg3_get_stats, + .ndo_get_stats64 = tg3_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_set_multicast_list = tg3_set_rx_mode, .ndo_set_mac_address = tg3_set_mac_addr, @@ -14280,7 +14318,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = { .ndo_open = tg3_open, .ndo_stop = tg3_close, .ndo_start_xmit = tg3_start_xmit_dma_bug, - .ndo_get_stats = tg3_get_stats, + .ndo_get_stats64 = tg3_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_set_multicast_list = tg3_set_rx_mode, .ndo_set_mac_address = tg3_set_mac_addr, @@ -14404,7 +14442,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, } if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && - tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) dev->netdev_ops = &tg3_netdev_ops; else dev->netdev_ops = &tg3_netdev_ops_dma_bug; @@ -14468,20 +14507,25 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * is off by default, but can be enabled using ethtool. */ if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && - (dev->features & NETIF_F_IP_CSUM)) + (dev->features & NETIF_F_IP_CSUM)) { dev->features |= NETIF_F_TSO; - + vlan_features_add(dev, NETIF_F_TSO); + } if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { - if (dev->features & NETIF_F_IPV6_CSUM) + if (dev->features & NETIF_F_IPV6_CSUM) { dev->features |= NETIF_F_TSO6; + vlan_features_add(dev, NETIF_F_TSO6); + } if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { dev->features |= NETIF_F_TSO_ECN; + vlan_features_add(dev, NETIF_F_TSO_ECN); + } } if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && @@ -14597,24 +14641,31 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_bus_string(tp, str), dev->dev_addr); - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { struct phy_device *phydev; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); - } else + } else { + char *ethtype; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + ethtype = "10/100Base-TX"; + else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + ethtype = "1000Base-SX"; + else + ethtype = "10/100/1000Base-T"; + netdev_info(dev, "attached PHY is %s (%s Ethernet) " - "(WireSpeed[%d])\n", tg3_phy_string(tp), - ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : - ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : - "10/100/1000Base-T")), - (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); + "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0); + } netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, - (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, + (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ce9c4918c3181cbaf2de835b3e42411d65d71f18..4937bd19096413bae1115b82cc63ce5123207536 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -53,6 +53,7 @@ #define TG3PCI_DEVICE_TIGON3_57765 0x16b4 #define TG3PCI_DEVICE_TIGON3_57791 0x16b2 #define TG3PCI_DEVICE_TIGON3_57795 0x16b6 +#define TG3PCI_DEVICE_TIGON3_5719 0x1657 /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 @@ -160,6 +161,7 @@ #define ASIC_REV_57780 0x57780 #define ASIC_REV_5717 0x5717 #define ASIC_REV_57765 0x57785 +#define ASIC_REV_5719 0x5719 #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -231,6 +233,7 @@ #define PCISTATE_RETRY_SAME_DMA 0x00002000 #define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 #define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 +#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 #define TG3PCI_CLOCK_CTRL 0x00000074 #define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 #define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 @@ -468,6 +471,7 @@ #define TX_MODE_FLOW_CTRL_ENABLE 0x00000010 #define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 #define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 +#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 #define MAC_TX_STATUS 0x00000460 #define TX_STATUS_XOFFED 0x00000001 #define TX_STATUS_SENT_XOFF 0x00000002 @@ -1071,10 +1075,8 @@ #define TG3_CPMU_HST_ACC 0x0000361c #define CPMU_HST_ACC_MACCLK_MASK 0x001f0000 #define CPMU_HST_ACC_MACCLK_6_25 0x00130000 -/* 0x3620 --> 0x362c unused */ +/* 0x3620 --> 0x3630 unused */ -#define TG3_CPMU_STATUS 0x0000362c -#define TG3_CPMU_STATUS_PCIE_FUNC 0x20000000 #define TG3_CPMU_CLCK_STAT 0x00003630 #define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 #define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 @@ -1842,6 +1844,10 @@ #define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080 /* 0x7d58 --> 0x7e70 unused */ +#define TG3_PCIE_PHY_TSTCTL 0x00007e2c +#define TG3_PCIE_PHY_TSTCTL_PCIE10 0x00000040 +#define TG3_PCIE_PHY_TSTCTL_PSCRAM 0x00000020 + #define TG3_PCIE_EIDLE_DELAY 0x00007e70 #define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f #define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c @@ -2030,31 +2036,9 @@ /* Currently this is fixed. */ -#define TG3_PHY_PCIE_ADDR 0x00 #define TG3_PHY_MII_ADDR 0x01 -/*** Tigon3 specific PHY PCIE registers. ***/ - -#define TG3_PCIEPHY_BLOCK_ADDR 0x1f -#define TG3_PCIEPHY_XGXS_BLK1 0x0801 -#define TG3_PCIEPHY_TXB_BLK 0x0861 -#define TG3_PCIEPHY_BLOCK_SHIFT 4 - -/* TG3_PCIEPHY_TXB_BLK */ -#define TG3_PCIEPHY_TX0CTRL1 0x15 -#define TG3_PCIEPHY_TX0CTRL1_TXOCM 0x0003 -#define TG3_PCIEPHY_TX0CTRL1_RDCTL 0x0008 -#define TG3_PCIEPHY_TX0CTRL1_TXCMV 0x0030 -#define TG3_PCIEPHY_TX0CTRL1_TKSEL 0x0040 -#define TG3_PCIEPHY_TX0CTRL1_NB_EN 0x0400 - -/* TG3_PCIEPHY_XGXS_BLK1 */ -#define TG3_PCIEPHY_PWRMGMT4 0x1a -#define TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN 0x0038 -#define TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN 0x4000 - - /*** Tigon3 specific PHY MII registers. ***/ #define TG3_BMCR_SPEED1000 0x0040 @@ -2073,8 +2057,9 @@ #define MII_TG3_EXT_STAT 0x11 /* Extended status register */ #define MII_TG3_EXT_STAT_LPASS 0x0100 +#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ #define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */ - +#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */ #define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */ #define MII_TG3_DSP_TAP1 0x0001 @@ -2082,6 +2067,7 @@ #define MII_TG3_DSP_AADJ1CH0 0x001f #define MII_TG3_DSP_AADJ1CH3 0x601f #define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 #define MII_TG3_DSP_EXP8 0x0f08 #define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 #define MII_TG3_DSP_EXP8_AEDW 0x0200 @@ -2177,6 +2163,8 @@ /* APE shared memory. Accessible through BAR1 */ #define TG3_APE_FW_STATUS 0x400c #define APE_FW_STATUS_READY 0x00000100 +#define TG3_APE_FW_FEATURES 0x4010 +#define TG3_APE_FW_FEATURE_NCSI 0x00000002 #define TG3_APE_FW_VERSION 0x4018 #define APE_FW_VERSION_MAJMSK 0xff000000 #define APE_FW_VERSION_MAJSFT 24 @@ -2191,7 +2179,9 @@ #define APE_HOST_SEG_LEN_MAGIC 0x0000001c #define TG3_APE_HOST_INIT_COUNT 0x4208 #define TG3_APE_HOST_DRIVER_ID 0x420c -#define APE_HOST_DRIVER_ID_MAGIC 0xf0035100 +#define APE_HOST_DRIVER_ID_LINUX 0xf0000000 +#define APE_HOST_DRIVER_ID_MAGIC(maj, min) \ + (APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8) #define TG3_APE_HOST_BEHAVIOR 0x4210 #define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 #define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 @@ -2209,6 +2199,11 @@ #define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 #define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 +#define TG3_APE_PER_LOCK_REQ 0x8400 +#define APE_LOCK_PER_REQ_DRIVER 0x00001000 +#define TG3_APE_PER_LOCK_GRANT 0x8420 +#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 + /* APE convenience enumerations. */ #define TG3_APE_LOCK_GRC 1 #define TG3_APE_LOCK_MEM 4 @@ -2539,7 +2534,6 @@ struct tg3_link_config { /* When we go in and out of low power mode we need * to swap with this state. */ - int phy_is_low_power; u16 orig_speed; u8 orig_duplex; u8 orig_autoneg; @@ -2765,8 +2759,8 @@ struct tg3 { /* begin "everything else" cacheline(s) section */ - struct net_device_stats net_stats; - struct net_device_stats net_stats_prev; + struct rtnl_link_stats64 net_stats; + struct rtnl_link_stats64 net_stats_prev; struct tg3_ethtool_stats estats; struct tg3_ethtool_stats estats_prev; @@ -2780,7 +2774,6 @@ struct tg3 { #define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 #define TG3_FLAG_RX_CHECKSUMS 0x00000004 #define TG3_FLAG_USE_LINKCHG_REG 0x00000008 -#define TG3_FLAG_USE_MI_INTERRUPT 0x00000010 #define TG3_FLAG_ENABLE_ASF 0x00000020 #define TG3_FLAG_ASPM_WORKAROUND 0x00000040 #define TG3_FLAG_POLL_SERDES 0x00000080 @@ -2802,7 +2795,6 @@ struct tg3 { #define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000 #define TG3_FLAG_WOL_CAP 0x00400000 #define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 -#define TG3_FLAG_10_100_ONLY 0x01000000 #define TG3_FLAG_PAUSE_AUTONEG 0x02000000 #define TG3_FLAG_CPMU_PRESENT 0x04000000 #define TG3_FLAG_40BIT_DMA_BUG 0x08000000 @@ -2813,22 +2805,15 @@ struct tg3 { u32 tg3_flags2; #define TG3_FLG2_RESTART_TIMER 0x00000001 #define TG3_FLG2_TSO_BUG 0x00000002 -#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004 #define TG3_FLG2_IS_5788 0x00000008 #define TG3_FLG2_MAX_RXPEND_64 0x00000010 #define TG3_FLG2_TSO_CAPABLE 0x00000020 -#define TG3_FLG2_PHY_ADC_BUG 0x00000040 -#define TG3_FLG2_PHY_5704_A0_BUG 0x00000080 -#define TG3_FLG2_PHY_BER_BUG 0x00000100 #define TG3_FLG2_PCI_EXPRESS 0x00000200 #define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400 #define TG3_FLG2_HW_AUTONEG 0x00000800 #define TG3_FLG2_IS_NIC 0x00001000 -#define TG3_FLG2_PHY_SERDES 0x00002000 -#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000 #define TG3_FLG2_FLASH 0x00008000 #define TG3_FLG2_HW_TSO_1 0x00010000 -#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 #define TG3_FLG2_5705_PLUS 0x00040000 #define TG3_FLG2_5750_PLUS 0x00080000 #define TG3_FLG2_HW_TSO_3 0x00100000 @@ -2836,10 +2821,6 @@ struct tg3 { #define TG3_FLG2_USING_MSIX 0x00400000 #define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ TG3_FLG2_USING_MSIX) -#define TG3_FLG2_MII_SERDES 0x00800000 -#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \ - TG3_FLG2_MII_SERDES) -#define TG3_FLG2_PARALLEL_DETECT 0x01000000 #define TG3_FLG2_ICH_WORKAROUND 0x02000000 #define TG3_FLG2_5780_CLASS 0x04000000 #define TG3_FLG2_HW_TSO_2 0x08000000 @@ -2847,9 +2828,7 @@ struct tg3 { TG3_FLG2_HW_TSO_2 | \ TG3_FLG2_HW_TSO_3) #define TG3_FLG2_1SHOT_MSI 0x10000000 -#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 #define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 -#define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000 u32 tg3_flags3; #define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 #define TG3_FLG3_ENABLE_APE 0x00000002 @@ -2857,15 +2836,12 @@ struct tg3 { #define TG3_FLG3_5701_DMA_BUG 0x00000008 #define TG3_FLG3_USE_PHYLIB 0x00000010 #define TG3_FLG3_MDIOBUS_INITED 0x00000020 -#define TG3_FLG3_PHY_CONNECTED 0x00000080 #define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100 #define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 #define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400 #define TG3_FLG3_CLKREQ_BUG 0x00000800 -#define TG3_FLG3_PHY_ENABLE_APD 0x00001000 #define TG3_FLG3_5755_PLUS 0x00002000 #define TG3_FLG3_NO_NVRAM 0x00004000 -#define TG3_FLG3_PHY_IS_FET 0x00010000 #define TG3_FLG3_ENABLE_RSS 0x00020000 #define TG3_FLG3_ENABLE_TSS 0x00040000 #define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000 @@ -2873,6 +2849,7 @@ struct tg3 { #define TG3_FLG3_SHORT_DMA_BUG 0x00200000 #define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 #define TG3_FLG3_L1PLLPD_EN 0x00800000 +#define TG3_FLG3_5717_PLUS 0x01000000 struct timer_list timer; u16 timer_counter; @@ -2942,6 +2919,7 @@ struct tg3 { #define TG3_PHY_ID_BCM5718C 0x5c0d8a00 #define TG3_PHY_ID_BCM5718S 0xbc050ff0 #define TG3_PHY_ID_BCM57765 0x5c0d8a40 +#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 #define TG3_PHY_ID_BCM5906 0xdc00ac40 #define TG3_PHY_ID_BCM8002 0x60010140 #define TG3_PHY_ID_INVALID 0xffffffff @@ -2965,7 +2943,29 @@ struct tg3 { (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ - (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002) + (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \ + (X) == TG3_PHY_ID_BCM8002) + + u32 phy_flags; +#define TG3_PHYFLG_IS_LOW_POWER 0x00000001 +#define TG3_PHYFLG_IS_CONNECTED 0x00000002 +#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004 +#define TG3_PHYFLG_PHY_SERDES 0x00000010 +#define TG3_PHYFLG_MII_SERDES 0x00000020 +#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \ + TG3_PHYFLG_MII_SERDES) +#define TG3_PHYFLG_IS_FET 0x00000040 +#define TG3_PHYFLG_10_100_ONLY 0x00000080 +#define TG3_PHYFLG_ENABLE_APD 0x00000100 +#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200 +#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400 +#define TG3_PHYFLG_JITTER_BUG 0x00000800 +#define TG3_PHYFLG_ADJUST_TRIM 0x00001000 +#define TG3_PHYFLG_ADC_BUG 0x00002000 +#define TG3_PHYFLG_5704_A0_BUG 0x00004000 +#define TG3_PHYFLG_BER_BUG 0x00008000 +#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 +#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 u32 led_ctrl; u32 phy_otp; diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 06b552fca63d7724b2c382fa4d2280dbaba8b0fe..5efa57757a2c8507f5bcf0d0d7f3ac5d1b5a26c3 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -262,13 +262,13 @@ struct de_srom_media_block { u16 csr13; u16 csr14; u16 csr15; -} __attribute__((packed)); +} __packed; struct de_srom_info_leaf { u16 default_media; u8 n_blocks; u8 unused; -} __attribute__((packed)); +} __packed; struct de_desc { __le32 opts1; diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c index 6002e651b9ea5f4f9510f89fa008b439f25fa664..3031ed9c4a1a83e384d23875bf55798dff73d65f 100644 --- a/drivers/net/tulip/eeprom.c +++ b/drivers/net/tulip/eeprom.c @@ -120,8 +120,8 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp) 0x00, 0x06 /* ttm bit map */ }; - tp->mtable = (struct mediatable *) - kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL); + tp->mtable = kmalloc(sizeof(struct mediatable) + + sizeof(struct medialeaf), GFP_KERNEL); if (tp->mtable == NULL) return; /* Horrible, impossible failure. */ @@ -227,9 +227,9 @@ void __devinit tulip_parse_eeprom(struct net_device *dev) return; } - mtable = (struct mediatable *) - kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf), - GFP_KERNEL); + mtable = kmalloc(sizeof(struct mediatable) + + count * sizeof(struct medialeaf), + GFP_KERNEL); if (mtable == NULL) return; /* Horrible, impossible failure. */ last_mediatable = tp->mtable = mtable; diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 0afa2d4f9472163c6fd8797725bfdb3957681ffa..e525875ed67d4b0aaadcea64d4d0a8c87620776f 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -51,22 +52,23 @@ struct tulip_chip_table { enum tbl_flag { - HAS_MII = 0x0001, - HAS_MEDIA_TABLE = 0x0002, - CSR12_IN_SROM = 0x0004, - ALWAYS_CHECK_MII = 0x0008, - HAS_ACPI = 0x0010, - MC_HASH_ONLY = 0x0020, /* Hash-only multicast filter. */ - HAS_PNICNWAY = 0x0080, - HAS_NWAY = 0x0040, /* Uses internal NWay xcvr. */ - HAS_INTR_MITIGATION = 0x0100, - IS_ASIX = 0x0200, - HAS_8023X = 0x0400, - COMET_MAC_ADDR = 0x0800, - HAS_PCI_MWI = 0x1000, - HAS_PHY_IRQ = 0x2000, - HAS_SWAPPED_SEEPROM = 0x4000, - NEEDS_FAKE_MEDIA_TABLE = 0x8000, + HAS_MII = 0x00001, + HAS_MEDIA_TABLE = 0x00002, + CSR12_IN_SROM = 0x00004, + ALWAYS_CHECK_MII = 0x00008, + HAS_ACPI = 0x00010, + MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */ + HAS_PNICNWAY = 0x00080, + HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */ + HAS_INTR_MITIGATION = 0x00100, + IS_ASIX = 0x00200, + HAS_8023X = 0x00400, + COMET_MAC_ADDR = 0x00800, + HAS_PCI_MWI = 0x01000, + HAS_PHY_IRQ = 0x02000, + HAS_SWAPPED_SEEPROM = 0x04000, + NEEDS_FAKE_MEDIA_TABLE = 0x08000, + COMET_PM = 0x10000, }; @@ -120,6 +122,11 @@ enum tulip_offsets { CSR13 = 0x68, CSR14 = 0x70, CSR15 = 0x78, + CSR18 = 0x88, + CSR19 = 0x8c, + CSR20 = 0x90, + CSR27 = 0xAC, + CSR28 = 0xB0, }; /* register offset and bits for CFDD PCI config reg */ @@ -289,6 +296,30 @@ enum t21143_csr6_bits { csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd), }; +enum tulip_comet_csr13_bits { +/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they + * determine which link status transition wakes up if LSCE is + * enabled */ + comet_csr13_linkoffe = (1 << 17), + comet_csr13_linkone = (1 << 16), + comet_csr13_wfre = (1 << 10), + comet_csr13_mpre = (1 << 9), + comet_csr13_lsce = (1 << 8), + comet_csr13_wfr = (1 << 2), + comet_csr13_mpr = (1 << 1), + comet_csr13_lsc = (1 << 0), +}; + +enum tulip_comet_csr18_bits { + comet_csr18_pmes_sticky = (1 << 24), + comet_csr18_pm_mode = (1 << 19), + comet_csr18_apm_mode = (1 << 18), + comet_csr18_d3a = (1 << 7) +}; + +enum tulip_comet_csr20_bits { + comet_csr20_pmes = (1 << 15), +}; /* Keep the ring sizes a power of two for efficiency. Making the Tx ring too large decreases the effectiveness of channel @@ -411,6 +442,7 @@ struct tulip_private { unsigned int csr6; /* Current CSR6 control settings. */ unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ void (*link_change) (struct net_device * dev, int csr5); + struct ethtool_wolinfo wolinfo; /* WOL settings */ u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */ u16 lpar; /* 21143 Link partner ability. */ u16 advertising[4]; diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 254643ed945e60e068336dc7d8218fada93798b3..3a8d7efa2acf554c6015de56a66b3390fc7503a1 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -272,6 +271,7 @@ static void tulip_down(struct net_device *dev); static struct net_device_stats *tulip_get_stats(struct net_device *dev); static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void set_rx_mode(struct net_device *dev); +static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_tulip(struct net_device *dev); #endif @@ -309,6 +309,11 @@ static void tulip_up(struct net_device *dev) /* Wake the chip from sleep/snooze mode. */ tulip_set_power_state (tp, 0, 0); + /* Disable all WOL events */ + pci_enable_wake(tp->pdev, PCI_D3hot, 0); + pci_enable_wake(tp->pdev, PCI_D3cold, 0); + tulip_set_wolopts(tp->pdev, 0); + /* On some chip revs we must set the MII/SYM port before the reset!? */ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) iowrite32(0x00040000, ioaddr + CSR6); @@ -345,8 +350,8 @@ static void tulip_up(struct net_device *dev) } else if (tp->flags & COMET_MAC_ADDR) { iowrite32(addr_low, ioaddr + 0xA4); iowrite32(addr_high, ioaddr + 0xA8); - iowrite32(0, ioaddr + 0xAC); - iowrite32(0, ioaddr + 0xB0); + iowrite32(0, ioaddr + CSR27); + iowrite32(0, ioaddr + CSR28); } } else { /* This is set_rx_mode(), but without starting the transmitter. */ @@ -591,10 +596,10 @@ static void tulip_tx_timeout(struct net_device *dev) pr_cont(" %02x", buf[j]); pr_cont(" j=%d\n", j); } - printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring); + printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status); - printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring); + printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status); pr_cont("\n"); @@ -876,8 +881,35 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in strcpy(info->bus_info, pci_name(np->pdev)); } + +static int tulip_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct tulip_private *tp = netdev_priv(dev); + + if (wolinfo->wolopts & (~tp->wolinfo.supported)) + return -EOPNOTSUPP; + + tp->wolinfo.wolopts = wolinfo->wolopts; + device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts); + return 0; +} + +static void tulip_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct tulip_private *tp = netdev_priv(dev); + + wolinfo->supported = tp->wolinfo.supported; + wolinfo->wolopts = tp->wolinfo.wolopts; + return; +} + + static const struct ethtool_ops ops = { - .get_drvinfo = tulip_get_drvinfo + .get_drvinfo = tulip_get_drvinfo, + .set_wol = tulip_ethtool_set_wol, + .get_wol = tulip_ethtool_get_wol, }; /* Provide ioctl() calls to examine the MII xcvr state. */ @@ -1093,8 +1125,8 @@ static void set_rx_mode(struct net_device *dev) iowrite32(3, ioaddr + CSR13); iowrite32(mc_filter[1], ioaddr + CSR14); } else if (tp->flags & COMET_MAC_ADDR) { - iowrite32(mc_filter[0], ioaddr + 0xAC); - iowrite32(mc_filter[1], ioaddr + 0xB0); + iowrite32(mc_filter[0], ioaddr + CSR27); + iowrite32(mc_filter[1], ioaddr + CSR28); } tp->mc_filter[0] = mc_filter[0]; tp->mc_filter[1] = mc_filter[1]; @@ -1309,6 +1341,12 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { pr_err(PFX "skipping LMC card\n"); return -ENODEV; + } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE && + (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 || + pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 || + pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) { + pr_err(PFX "skipping SBE T3E3 port\n"); + return -ENODEV; } /* @@ -1381,6 +1419,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, return i; } + /* The chip will fail to enter a low-power state later unless + * first explicitly commanded into D0 */ + if (pci_set_power_state(pdev, PCI_D0)) { + printk (KERN_NOTICE PFX + "Failed to set power state to D0\n"); + } + irq = pdev->irq; /* alloc_etherdev ensures aligned and zeroed private structures */ @@ -1427,6 +1472,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, tp->chip_id = chip_idx; tp->flags = tulip_tbl[chip_idx].flags; + + tp->wolinfo.supported = 0; + tp->wolinfo.wolopts = 0; + /* COMET: Enable power management only for AN983B */ + if (chip_idx == COMET ) { + u32 sig; + pci_read_config_dword (pdev, 0x80, &sig); + if (sig == 0x09811317) { + tp->flags |= COMET_PM; + tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC; + printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n"); + } + } tp->pdev = pdev; tp->base_addr = ioaddr; tp->revision = pdev->revision; @@ -1759,11 +1817,43 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, } +/* set the registers according to the given wolopts */ +static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + + if (tp->flags & COMET_PM) { + + unsigned int tmp; + + tmp = ioread32(ioaddr + CSR18); + tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a); + tmp |= comet_csr18_pm_mode; + iowrite32(tmp, ioaddr + CSR18); + + /* Set the Wake-up Control/Status Register to the given WOL options*/ + tmp = ioread32(ioaddr + CSR13); + tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre); + if (wolopts & WAKE_MAGIC) + tmp |= comet_csr13_mpre; + if (wolopts & WAKE_PHY) + tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce; + /* Clear the event flags */ + tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc; + iowrite32(tmp, ioaddr + CSR13); + } +} + #ifdef CONFIG_PM + static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) { + pci_power_t pstate; struct net_device *dev = pci_get_drvdata(pdev); + struct tulip_private *tp = netdev_priv(dev); if (!dev) return -EINVAL; @@ -1779,7 +1869,16 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) save_state: pci_save_state(pdev); pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + pstate = pci_choose_state(pdev, state); + if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) { + int rc; + + tulip_set_wolopts(pdev, tp->wolinfo.wolopts); + rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts); + if (rc) + printk("tulip: pci_enable_wake failed (%d)\n", rc); + } + pci_set_power_state(pdev, pstate); return 0; } @@ -1788,7 +1887,10 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) static int tulip_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; int retval; + unsigned int tmp; if (!dev) return -EINVAL; @@ -1809,6 +1911,18 @@ static int tulip_resume(struct pci_dev *pdev) return retval; } + if (tp->flags & COMET_PM) { + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* Clear the PMES flag */ + tmp = ioread32(ioaddr + CSR20); + tmp |= comet_csr20_pmes; + iowrite32(tmp, ioaddr + CSR20); + + /* Disable all wake-up events */ + tulip_set_wolopts(pdev, 0); + } netif_device_attach(dev); if (netif_running(dev)) diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 608b279b921b20a033d906b28929f4711d77495a..66d41cf8da29fb5e233f3be923278142ee61b7a4 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -1514,12 +1514,12 @@ static int netdev_close(struct net_device *dev) if (debug > 2) { int i; - printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring); + printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", i, np->tx_ring[i].length, np->tx_ring[i].status, np->tx_ring[i].buffer1); - printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring); + printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", i, np->rx_ring[i].length, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 63042596f0cf34962afe9c25133e27b0aa746701..55f3a3e667a9af0291da7c7d4e46c12e84d7ce6a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -149,6 +149,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file) tfile->tun = tun; tun->tfile = tfile; tun->socket.file = file; + netif_carrier_on(tun->dev); dev_hold(tun->dev); sock_hold(tun->socket.sk); atomic_inc(&tfile->count); @@ -162,6 +163,7 @@ static void __tun_detach(struct tun_struct *tun) { /* Detach from net device */ netif_tx_lock_bh(tun->dev); + netif_carrier_off(tun->dev); tun->tfile = NULL; tun->socket.file = NULL; netif_tx_unlock_bh(tun->dev); @@ -1574,12 +1576,6 @@ static void tun_set_msglevel(struct net_device *dev, u32 value) #endif } -static u32 tun_get_link(struct net_device *dev) -{ - struct tun_struct *tun = netdev_priv(dev); - return !!tun->tfile; -} - static u32 tun_get_rx_csum(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -1601,7 +1597,7 @@ static const struct ethtool_ops tun_ethtool_ops = { .get_drvinfo = tun_get_drvinfo, .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, - .get_link = tun_get_link, + .get_link = ethtool_op_get_link, .get_rx_csum = tun_get_rx_csum, .set_rx_csum = tun_set_rx_csum }; diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 22bde49262c075de51aedc7ae75a4a9cefb79387..2e50077ff450b249a9042259375b5288f41965c6 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -255,7 +255,7 @@ struct typhoon_shared { struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; u32 zeroWord; struct tx_desc txHi[TXHI_ENTRIES]; -} __attribute__ ((packed)); +} __packed; struct rxbuff_ent { struct sk_buff *skb; diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h index 673fd512591402c9729fe5e52dc88187357d31c6..88187fc84aa31904e3d82d4608968813bb09cb71 100644 --- a/drivers/net/typhoon.h +++ b/drivers/net/typhoon.h @@ -77,7 +77,7 @@ struct typhoon_indexes { volatile __le32 cmdCleared; volatile __le32 respReady; volatile __le32 rxHiReady; -} __attribute__ ((packed)); +} __packed; /* The host<->Typhoon interface * Our means of communicating where things are @@ -125,7 +125,7 @@ struct typhoon_interface { __le32 rxHiAddr; __le32 rxHiAddrHi; __le32 rxHiSize; -} __attribute__ ((packed)); +} __packed; /* The Typhoon transmit/fragment descriptor * @@ -187,7 +187,7 @@ struct tx_desc { #define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) #define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) #define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 -} __attribute__ ((packed)); +} __packed; /* The TCP Segmentation offload option descriptor * @@ -208,7 +208,7 @@ struct tcpopt_desc { __le32 respAddrLo; __le32 bytesTx; __le32 status; -} __attribute__ ((packed)); +} __packed; /* The IPSEC Offload descriptor * @@ -227,7 +227,7 @@ struct ipsec_desc { __le32 sa1; __le32 sa2; __le32 reserved; -} __attribute__ ((packed)); +} __packed; /* The Typhoon receive descriptor (Updated by NIC) * @@ -284,7 +284,7 @@ struct rx_desc { #define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) #define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) __be32 vlanTag; -} __attribute__ ((packed)); +} __packed; /* The Typhoon free buffer descriptor, used to give a buffer to the NIC * @@ -301,7 +301,7 @@ struct rx_free { __le32 physAddrHi; u32 virtAddr; u32 virtAddrHi; -} __attribute__ ((packed)); +} __packed; /* The Typhoon command descriptor, used for commands and responses * @@ -347,7 +347,7 @@ struct cmd_desc { __le16 parm1; __le32 parm2; __le32 parm3; -} __attribute__ ((packed)); +} __packed; /* The Typhoon response descriptor, see command descriptor for details */ @@ -359,7 +359,7 @@ struct resp_desc { __le16 parm1; __le32 parm2; __le32 parm3; -} __attribute__ ((packed)); +} __packed; #define INIT_COMMAND_NO_RESPONSE(x, command) \ do { struct cmd_desc *_ptr = (x); \ @@ -427,7 +427,7 @@ struct stats_resp { #define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) __le32 unused2; __le32 unused3; -} __attribute__ ((packed)); +} __packed; /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) */ @@ -488,7 +488,7 @@ struct sa_descriptor { u32 index; u32 unused; u32 unused2; -} __attribute__ ((packed)); +} __packed; /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) * This is all for IPv4. @@ -518,14 +518,14 @@ struct typhoon_file_header { __le32 numSections; __le32 startAddr; __le32 hmacDigest[5]; -} __attribute__ ((packed)); +} __packed; struct typhoon_section_header { __le32 len; u16 checksum; u16 reserved; __le32 startAddr; -} __attribute__ ((packed)); +} __packed; /* The Typhoon Register offsets */ diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 807470e156affad7d05529ed7ffb0d29d0973c48..8d532f9b50d073069bbcb109b10767f7c3c09395 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -594,7 +594,7 @@ static void dump_regs(struct ucc_geth_private *ugeth) { int i; - ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); + ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1); ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", @@ -3704,6 +3704,19 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type) return PHY_INTERFACE_MODE_MII; } +static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!ugeth->phydev) + return -ENODEV; + + return phy_mii_ioctl(ugeth->phydev, rq, cmd); +} + static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_open = ucc_geth_open, .ndo_stop = ucc_geth_close, @@ -3713,6 +3726,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_change_mtu = eth_change_mtu, .ndo_set_multicast_list = ucc_geth_set_multi, .ndo_tx_timeout = ucc_geth_timeout, + .ndo_do_ioctl = ucc_geth_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ucc_netpoll, #endif diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index ef1fbeb11c6e88ac09ef0ba55e6e9e33364100be..05a95586f3c52f76ead1b26bb6545b3a18b1a076 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -106,7 +106,7 @@ struct ucc_geth { u32 scar; /* Statistics carry register */ u32 scam; /* Statistics caryy mask register */ u8 res5[0x200 - 0x1c4]; -} __attribute__ ((packed)); +} __packed; /* UCC GETH TEMODR Register */ #define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics @@ -420,11 +420,11 @@ struct ucc_geth { struct ucc_geth_thread_data_tx { u8 res0[104]; -} __attribute__ ((packed)); +} __packed; struct ucc_geth_thread_data_rx { u8 res0[40]; -} __attribute__ ((packed)); +} __packed; /* Send Queue Queue-Descriptor */ struct ucc_geth_send_queue_qd { @@ -432,19 +432,19 @@ struct ucc_geth_send_queue_qd { u8 res0[0x8]; u32 last_bd_completed_address;/* initialize to last entry in BD ring */ u8 res1[0x30]; -} __attribute__ ((packed)); +} __packed; struct ucc_geth_send_queue_mem_region { struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; -} __attribute__ ((packed)); +} __packed; struct ucc_geth_thread_tx_pram { u8 res0[64]; -} __attribute__ ((packed)); +} __packed; struct ucc_geth_thread_rx_pram { u8 res0[128]; -} __attribute__ ((packed)); +} __packed; #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 @@ -484,7 +484,7 @@ struct ucc_geth_scheduler { /**< weight factor for queues */ u32 minw; /* temporary variable handled by QE */ u8 res1[0x70 - 0x64]; -} __attribute__ ((packed)); +} __packed; struct ucc_geth_tx_firmware_statistics_pram { u32 sicoltx; /* single collision */ @@ -506,7 +506,7 @@ struct ucc_geth_tx_firmware_statistics_pram { and 1518 octets */ u32 txpktsjumbo; /* total packets (including bad) between 1024 and MAXLength octets */ -} __attribute__ ((packed)); +} __packed; struct ucc_geth_rx_firmware_statistics_pram { u32 frrxfcser; /* frames with crc error */ @@ -540,7 +540,7 @@ struct ucc_geth_rx_firmware_statistics_pram { replaced */ u32 insertvlan; /* total frames that had their VLAN tag inserted */ -} __attribute__ ((packed)); +} __packed; struct ucc_geth_rx_interrupt_coalescing_entry { u32 interruptcoalescingmaxvalue; /* interrupt coalescing max @@ -548,23 +548,23 @@ struct ucc_geth_rx_interrupt_coalescing_entry { u32 interruptcoalescingcounter; /* interrupt coalescing counter, initialize to interruptcoalescingmaxvalue */ -} __attribute__ ((packed)); +} __packed; struct ucc_geth_rx_interrupt_coalescing_table { struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; /**< interrupt coalescing entry */ -} __attribute__ ((packed)); +} __packed; struct ucc_geth_rx_prefetched_bds { struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ -} __attribute__ ((packed)); +} __packed; struct ucc_geth_rx_bd_queues_entry { u32 bdbaseptr; /* BD base pointer */ u32 bdptr; /* BD pointer */ u32 externalbdbaseptr; /* external BD base pointer */ u32 externalbdptr; /* external BD pointer */ -} __attribute__ ((packed)); +} __packed; struct ucc_geth_tx_global_pram { u16 temoder; @@ -580,13 +580,13 @@ struct ucc_geth_tx_global_pram { u32 tqptr; /* a base pointer to the Tx Queues Memory Region */ u8 res2[0x80 - 0x74]; -} __attribute__ ((packed)); +} __packed; /* structure representing Extended Filtering Global Parameters in PRAM */ struct ucc_geth_exf_global_pram { u32 l2pcdptr; /* individual address filter, high */ u8 res0[0x10 - 0x04]; -} __attribute__ ((packed)); +} __packed; struct ucc_geth_rx_global_pram { u32 remoder; /* ethernet mode reg. */ @@ -620,7 +620,7 @@ struct ucc_geth_rx_global_pram { u32 exfGlobalParam; /* base address for extended filtering global parameters */ u8 res6[0x100 - 0xC4]; /* Initialize to zero */ -} __attribute__ ((packed)); +} __packed; #define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 @@ -639,7 +639,7 @@ struct ucc_geth_init_pram { u32 txglobal; /* tx global */ u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ u8 res3[0x1]; -} __attribute__ ((packed)); +} __packed; #define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) #define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) @@ -661,7 +661,7 @@ struct ucc_geth_82xx_enet_address { u16 h; /* address (MSB) */ u16 m; /* address */ u16 l; /* address (LSB) */ -} __attribute__ ((packed)); +} __packed; /* structure representing 82xx Address Filtering PRAM */ struct ucc_geth_82xx_address_filtering_pram { @@ -672,7 +672,7 @@ struct ucc_geth_82xx_address_filtering_pram { struct ucc_geth_82xx_enet_address __iomem taddr; struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; u8 res0[0x40 - 0x38]; -} __attribute__ ((packed)); +} __packed; /* GETH Tx firmware statistics structure, used when calling UCC_GETH_GetStatistics. */ @@ -696,7 +696,7 @@ struct ucc_geth_tx_firmware_statistics { and 1518 octets */ u32 txpktsjumbo; /* total packets (including bad) between 1024 and MAXLength octets */ -} __attribute__ ((packed)); +} __packed; /* GETH Rx firmware statistics structure, used when calling UCC_GETH_GetStatistics. */ @@ -732,7 +732,7 @@ struct ucc_geth_rx_firmware_statistics { replaced */ u32 insertvlan; /* total frames that had their VLAN tag inserted */ -} __attribute__ ((packed)); +} __packed; /* GETH hardware statistics structure, used when calling UCC_GETH_GetStatistics. */ @@ -781,7 +781,7 @@ struct ucc_geth_hardware_statistics { u32 rbca; /* Total number of frames received successfully that had destination address equal to the broadcast address */ -} __attribute__ ((packed)); +} __packed; /* UCC GETH Tx errors returned via TxConf callback */ #define TX_ERRORS_DEF 0x0200 diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 9516f382a6baf76dfdafe63afdd3b22380a2595f..aea4645be7f68956472e6f6fbefeadb2e049f060 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -179,7 +179,7 @@ struct ax88172_int_data { __le16 res2; u8 status; __le16 res3; -} __attribute__ ((packed)); +} __packed; static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index dc9444525b4901c2035d50e5c84b7f39b510bc6d..109751bad3bb39e39799bb9638e7a947fa11677d 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -97,8 +97,9 @@ static void tx_complete(struct urb *req) struct sk_buff *skb = req->context; struct net_device *dev = skb->dev; struct usbpn_dev *pnd = netdev_priv(dev); + int status = req->status; - switch (req->status) { + switch (status) { case 0: dev->stats.tx_bytes += skb->len; break; @@ -109,7 +110,7 @@ static void tx_complete(struct urb *req) dev->stats.tx_aborted_errors++; default: dev->stats.tx_errors++; - dev_dbg(&dev->dev, "TX error (%d)\n", req->status); + dev_dbg(&dev->dev, "TX error (%d)\n", status); } dev->stats.tx_packets++; @@ -150,8 +151,9 @@ static void rx_complete(struct urb *req) struct page *page = virt_to_page(req->transfer_buffer); struct sk_buff *skb; unsigned long flags; + int status = req->status; - switch (req->status) { + switch (status) { case 0: spin_lock_irqsave(&pnd->rx_lock, flags); skb = pnd->rx_skb; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 4dd23513c5af1d661e23498740fd9f20841426e0..6efca66b87663a84ec7f36529c190aa5a107d401 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -73,7 +73,6 @@ #include -#define DRIVER_VERSION "1.2" #define MOD_AUTHOR "Option Wireless" #define MOD_DESCRIPTION "USB High Speed Option driver" #define MOD_LICENSE "GPL" @@ -211,7 +210,7 @@ struct hso_serial_state_notification { u16 wIndex; u16 wLength; u16 UART_state_bitmap; -} __attribute__((packed)); +} __packed; struct hso_tiocmget { struct mutex mutex; @@ -401,7 +400,7 @@ static int disable_net; /* driver info */ static const char driver_name[] = "hso"; static const char tty_filename[] = "ttyHS"; -static const char *version = __FILE__ ": " DRIVER_VERSION " " MOD_AUTHOR; +static const char *version = __FILE__ ": " MOD_AUTHOR; /* the usb driver itself (registered in hso_init) */ static struct usb_driver hso_driver; /* serial structures */ @@ -478,6 +477,7 @@ static const struct usb_device_id hso_ids[] = { {USB_DEVICE(0x0af0, 0x8600)}, {USB_DEVICE(0x0af0, 0x8800)}, {USB_DEVICE(0x0af0, 0x8900)}, + {USB_DEVICE(0x0af0, 0x9000)}, {USB_DEVICE(0x0af0, 0xd035)}, {USB_DEVICE(0x0af0, 0xd055)}, {USB_DEVICE(0x0af0, 0xd155)}, @@ -848,7 +848,6 @@ static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info struct hso_net *odev = netdev_priv(net); strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); - strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info); } @@ -3388,7 +3387,6 @@ module_exit(hso_exit); MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESCRIPTION); MODULE_LICENSE(MOD_LICENSE); -MODULE_INFO(Version, DRIVER_VERSION); /* change the debug level (eg: insmod hso.ko debug=0x04) */ MODULE_PARM_DESC(debug, "Level of debug [0x01 | 0x02 | 0x04 | 0x08 | 0x10]"); diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 197c352c47fb0233f7502fc4e12a7e7a16a4fcb9..08e7b6abacdd29f3fc78678834ed54feeabe6c6d 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -193,7 +193,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb) case 0: break; default: - err("%s: urb status: %d", __func__, urb->status); + err("%s: urb status: %d", __func__, status); return; } @@ -222,16 +222,17 @@ static void ipheth_rcvbulk_callback(struct urb *urb) static void ipheth_sndbulk_callback(struct urb *urb) { struct ipheth_device *dev; + int status = urb->status; dev = urb->context; if (dev == NULL) return; - if (urb->status != 0 && - urb->status != -ENOENT && - urb->status != -ECONNRESET && - urb->status != -ESHUTDOWN) - err("%s: urb status: %d", __func__, urb->status); + if (status != 0 && + status != -ENOENT && + status != -ECONNRESET && + status != -ESHUTDOWN) + err("%s: urb status: %d", __func__, status); dev_kfree_skb_irq(dev->tx_skb); netif_wake_queue(dev->net); diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index d6078b8c4273eaa13e4a21f479bb82f427149bec..2b7b39cad1ce954f7eec5511fefb7f86611c45a1 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -207,7 +207,7 @@ struct kaweth_ethernet_configuration __le16 segment_size; __u16 max_multicast_filters; __u8 reserved3; -} __attribute__ ((packed)); +} __packed; /**************************************************************** * kaweth_device diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 961a8ed38d8f40622a49f9929184a0c7c4f6042c..ba72a7281cb0d4387e6549c3c31e6862704c3c87 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c @@ -64,13 +64,13 @@ struct nc_header { // packed: // all else is optional, and must start with: // __le16 vendorId; // from usb-if // __le16 productId; -} __attribute__((__packed__)); +} __packed; #define PAD_BYTE ((unsigned char)0xAC) struct nc_trailer { __le16 packet_id; -} __attribute__((__packed__)); +} __packed; // packets may use FLAG_FRAMING_NC and optional pad #define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \ diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 974d17f0263e2229e5f64be97b2234ba684a7aea..6710f09346d6ba665d9003d98700b5f3ca2d5ac9 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -21,11 +21,11 @@ * behaves. Pegasus II support added since this version. * TODO: suppressing HCD warnings spewage on disconnect. * v0.4.13 Ethernet address is now set at probe(), not at open() - * time as this seems to break dhcpd. + * time as this seems to break dhcpd. * v0.5.0 branch to 2.5.x kernels * v0.5.1 ethtool support added * v0.5.5 rx socket buffers are in a pool and the their allocation - * is out of the interrupt routine. + * is out of the interrupt routine. */ #include @@ -55,9 +55,9 @@ static const char driver_name[] = "pegasus"; #define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ BMSR_100FULL | BMSR_ANEGCAPABLE) -static int loopback = 0; -static int mii_mode = 0; -static char *devid=NULL; +static int loopback; +static int mii_mode; +static char *devid; static struct usb_eth_dev usb_dev_id[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ @@ -102,8 +102,8 @@ MODULE_PARM_DESC(devid, "The format is: 'DEV_name:VendorID:DeviceID:Flags'"); /* use ethtool to change the level for any given device */ static int msg_level = -1; -module_param (msg_level, int, 0); -MODULE_PARM_DESC (msg_level, "Override default message level"); +module_param(msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Override default message level"); MODULE_DEVICE_TABLE(usb, pegasus_ids); static const struct net_device_ops pegasus_netdev_ops; @@ -141,7 +141,7 @@ static void ctrl_callback(struct urb *urb) wake_up(&pegasus->ctrl_wait); } -static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size, +static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { int ret; @@ -196,7 +196,7 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size, return ret; } -static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size, +static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { int ret; @@ -248,7 +248,7 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size, return ret; } -static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data) +static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { int ret; char *tmp; @@ -299,7 +299,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data) return ret; } -static int update_eth_regs_async(pegasus_t * pegasus) +static int update_eth_regs_async(pegasus_t *pegasus) { int ret; @@ -326,7 +326,7 @@ static int update_eth_regs_async(pegasus_t * pegasus) } /* Returns 0 on success, error on failure */ -static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd) +static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd) { int i; __u8 data[4] = { phy, 0, 0, indx }; @@ -334,7 +334,7 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd) int ret; set_register(pegasus, PhyCtrl, 0); - set_registers(pegasus, PhyAddr, sizeof (data), data); + set_registers(pegasus, PhyAddr, sizeof(data), data); set_register(pegasus, PhyCtrl, (indx | PHY_READ)); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, PhyCtrl, 1, data); @@ -366,7 +366,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc) return (int)res; } -static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd) +static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 regd) { int i; __u8 data[4] = { phy, 0, 0, indx }; @@ -402,7 +402,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) write_mii_word(pegasus, phy_id, loc, val); } -static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata) +static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) { int i; __u8 tmp; @@ -433,7 +433,7 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata) } #ifdef PEGASUS_WRITE_EEPROM -static inline void enable_eprom_write(pegasus_t * pegasus) +static inline void enable_eprom_write(pegasus_t *pegasus) { __u8 tmp; int ret; @@ -442,7 +442,7 @@ static inline void enable_eprom_write(pegasus_t * pegasus) set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE); } -static inline void disable_eprom_write(pegasus_t * pegasus) +static inline void disable_eprom_write(pegasus_t *pegasus) { __u8 tmp; int ret; @@ -452,7 +452,7 @@ static inline void disable_eprom_write(pegasus_t * pegasus) set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE); } -static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data) +static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data) { int i; __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; @@ -484,7 +484,7 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data) } #endif /* PEGASUS_WRITE_EEPROM */ -static inline void get_node_id(pegasus_t * pegasus, __u8 * id) +static inline void get_node_id(pegasus_t *pegasus, __u8 *id) { int i; __u16 w16; @@ -495,7 +495,7 @@ static inline void get_node_id(pegasus_t * pegasus, __u8 * id) } } -static void set_ethernet_addr(pegasus_t * pegasus) +static void set_ethernet_addr(pegasus_t *pegasus) { __u8 node_id[6]; @@ -503,12 +503,12 @@ static void set_ethernet_addr(pegasus_t * pegasus) get_registers(pegasus, 0x10, sizeof(node_id), node_id); } else { get_node_id(pegasus, node_id); - set_registers(pegasus, EthID, sizeof (node_id), node_id); + set_registers(pegasus, EthID, sizeof(node_id), node_id); } - memcpy(pegasus->net->dev_addr, node_id, sizeof (node_id)); + memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id)); } -static inline int reset_mac(pegasus_t * pegasus) +static inline int reset_mac(pegasus_t *pegasus) { __u8 data = 0x8; int i; @@ -563,7 +563,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) data[1] = 0; data[2] = (loopback & 1) ? 0x09 : 0x01; - memcpy(pegasus->eth_regs, data, sizeof (data)); + memcpy(pegasus->eth_regs, data, sizeof(data)); ret = set_registers(pegasus, EthCtrl0, 3, data); if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS || @@ -577,7 +577,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) return ret; } -static void fill_skb_pool(pegasus_t * pegasus) +static void fill_skb_pool(pegasus_t *pegasus) { int i; @@ -595,7 +595,7 @@ static void fill_skb_pool(pegasus_t * pegasus) } } -static void free_skb_pool(pegasus_t * pegasus) +static void free_skb_pool(pegasus_t *pegasus) { int i; @@ -667,11 +667,11 @@ static void read_bulk_callback(struct urb *urb) netif_dbg(pegasus, rx_err, net, "RX packet error %x\n", rx_status); pegasus->stats.rx_errors++; - if (rx_status & 0x06) // long or runt + if (rx_status & 0x06) /* long or runt */ pegasus->stats.rx_length_errors++; if (rx_status & 0x08) pegasus->stats.rx_crc_errors++; - if (rx_status & 0x10) // extra bits + if (rx_status & 0x10) /* extra bits */ pegasus->stats.rx_frame_errors++; goto goon; } @@ -748,9 +748,8 @@ static void rx_fixup(unsigned long data) if (pegasus->flags & PEGASUS_RX_URB_FAIL) if (pegasus->rx_skb) goto try_again; - if (pegasus->rx_skb == NULL) { + if (pegasus->rx_skb == NULL) pegasus->rx_skb = pull_skb(pegasus); - } if (pegasus->rx_skb == NULL) { netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n"); tasklet_schedule(&pegasus->rx_tl); @@ -835,7 +834,7 @@ static void intr_callback(struct urb *urb) } if (urb->actual_length >= 6) { - u8 * d = urb->transfer_buffer; + u8 *d = urb->transfer_buffer; /* byte 0 == tx_status1, reg 2B */ if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL @@ -918,14 +917,14 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev) return &((pegasus_t *) netdev_priv(dev))->stats; } -static inline void disable_net_traffic(pegasus_t * pegasus) +static inline void disable_net_traffic(pegasus_t *pegasus) { __le16 tmp = cpu_to_le16(0); set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); } -static inline void get_interrupt_interval(pegasus_t * pegasus) +static inline void get_interrupt_interval(pegasus_t *pegasus) { u16 data; u8 interval; @@ -961,7 +960,7 @@ static void set_carrier(struct net_device *net) netif_carrier_off(net); } -static void free_all_urbs(pegasus_t * pegasus) +static void free_all_urbs(pegasus_t *pegasus) { usb_free_urb(pegasus->intr_urb); usb_free_urb(pegasus->tx_urb); @@ -969,7 +968,7 @@ static void free_all_urbs(pegasus_t * pegasus) usb_free_urb(pegasus->ctrl_urb); } -static void unlink_all_urbs(pegasus_t * pegasus) +static void unlink_all_urbs(pegasus_t *pegasus) { usb_kill_urb(pegasus->intr_urb); usb_kill_urb(pegasus->tx_urb); @@ -977,12 +976,11 @@ static void unlink_all_urbs(pegasus_t * pegasus) usb_kill_urb(pegasus->ctrl_urb); } -static int alloc_urbs(pegasus_t * pegasus) +static int alloc_urbs(pegasus_t *pegasus) { pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!pegasus->ctrl_urb) { + if (!pegasus->ctrl_urb) return 0; - } pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->rx_urb) { usb_free_urb(pegasus->ctrl_urb); @@ -1019,7 +1017,7 @@ static int pegasus_open(struct net_device *net) return -ENOMEM; res = set_registers(pegasus, EthID, 6, net->dev_addr); - + usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), pegasus->rx_skb->data, PEGASUS_MTU + 8, @@ -1033,7 +1031,7 @@ static int pegasus_open(struct net_device *net) usb_fill_int_urb(pegasus->intr_urb, pegasus->usb, usb_rcvintpipe(pegasus->usb, 3), - pegasus->intr_buff, sizeof (pegasus->intr_buff), + pegasus->intr_buff, sizeof(pegasus->intr_buff), intr_callback, pegasus, pegasus->intr_interval); if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) { if (res == -ENODEV) @@ -1076,9 +1074,9 @@ static void pegasus_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { pegasus_t *pegasus = netdev_priv(dev); - strncpy(info->driver, driver_name, sizeof (info->driver) - 1); - strncpy(info->version, DRIVER_VERSION, sizeof (info->version) - 1); - usb_make_path(pegasus->usb, info->bus_info, sizeof (info->bus_info)); + strncpy(info->driver, driver_name, sizeof(info->driver) - 1); + strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1); + usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info)); } /* also handles three patterns of some kind in hardware */ @@ -1098,7 +1096,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { pegasus_t *pegasus = netdev_priv(dev); u8 reg78 = 0x04; - + if (wol->wolopts & ~WOL_SUPPORTED) return -EINVAL; @@ -1118,7 +1116,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static inline void pegasus_reset_wol(struct net_device *dev) { struct ethtool_wolinfo wol; - + memset(&wol, 0, sizeof wol); (void) pegasus_set_wol(dev, &wol); } @@ -1178,7 +1176,7 @@ static const struct ethtool_ops ops = { static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) { - __u16 *data = (__u16 *) & rq->ifr_ifru; + __u16 *data = (__u16 *) &rq->ifr_ifru; pegasus_t *pegasus = netdev_priv(net); int res; @@ -1223,7 +1221,7 @@ static void pegasus_set_multicast(struct net_device *net) ctrl_callback(pegasus->ctrl_urb); } -static __u8 mii_phy_probe(pegasus_t * pegasus) +static __u8 mii_phy_probe(pegasus_t *pegasus) { int i; __u16 tmp; @@ -1239,10 +1237,10 @@ static __u8 mii_phy_probe(pegasus_t * pegasus) return 0xff; } -static inline void setup_pegasus_II(pegasus_t * pegasus) +static inline void setup_pegasus_II(pegasus_t *pegasus) { __u8 data = 0xa5; - + set_register(pegasus, Reg1d, 0); set_register(pegasus, Reg7b, 1); mdelay(100); @@ -1254,16 +1252,15 @@ static inline void setup_pegasus_II(pegasus_t * pegasus) set_register(pegasus, 0x83, data); get_registers(pegasus, 0x83, 1, &data); - if (data == 0xa5) { + if (data == 0xa5) pegasus->chip = 0x8513; - } else { + else pegasus->chip = 0; - } set_register(pegasus, 0x80, 0xc0); set_register(pegasus, 0x83, 0xff); set_register(pegasus, 0x84, 0x01); - + if (pegasus->features & HAS_HOME_PNA && mii_mode) set_register(pegasus, Reg81, 6); else @@ -1272,7 +1269,7 @@ static inline void setup_pegasus_II(pegasus_t * pegasus) static int pegasus_count; -static struct workqueue_struct *pegasus_workqueue = NULL; +static struct workqueue_struct *pegasus_workqueue; #define CARRIER_CHECK_DELAY (2 * HZ) static void check_carrier(struct work_struct *work) @@ -1367,7 +1364,7 @@ static int pegasus_probe(struct usb_interface *intf, pegasus->mii.phy_id_mask = 0x1f; pegasus->mii.reg_num_mask = 0x1f; spin_lock_init(&pegasus->rx_pool_lock); - pegasus->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV + pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); pegasus->features = usb_dev_id[dev_index].private; @@ -1442,11 +1439,11 @@ static void pegasus_disconnect(struct usb_interface *intf) pegasus_dec_workqueue(); } -static int pegasus_suspend (struct usb_interface *intf, pm_message_t message) +static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) { struct pegasus *pegasus = usb_get_intfdata(intf); - - netif_device_detach (pegasus->net); + + netif_device_detach(pegasus->net); cancel_delayed_work(&pegasus->carrier_check); if (netif_running(pegasus->net)) { usb_kill_urb(pegasus->rx_urb); @@ -1455,11 +1452,11 @@ static int pegasus_suspend (struct usb_interface *intf, pm_message_t message) return 0; } -static int pegasus_resume (struct usb_interface *intf) +static int pegasus_resume(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); - netif_device_attach (pegasus->net); + netif_device_attach(pegasus->net); if (netif_running(pegasus->net)) { pegasus->rx_urb->status = 0; pegasus->rx_urb->actual_length = 0; @@ -1498,8 +1495,8 @@ static struct usb_driver pegasus_driver = { static void __init parse_id(char *id) { - unsigned int vendor_id=0, device_id=0, flags=0, i=0; - char *token, *name=NULL; + unsigned int vendor_id = 0, device_id = 0, flags = 0, i = 0; + char *token, *name = NULL; if ((token = strsep(&id, ":")) != NULL) name = token; @@ -1510,14 +1507,14 @@ static void __init parse_id(char *id) device_id = simple_strtoul(token, NULL, 16); flags = simple_strtoul(id, NULL, 16); pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n", - driver_name, name, vendor_id, device_id, flags); + driver_name, name, vendor_id, device_id, flags); if (vendor_id > 0x10000 || vendor_id == 0) return; if (device_id > 0x10000 || device_id == 0) return; - for (i=0; usb_dev_id[i].name; i++); + for (i = 0; usb_dev_id[i].name; i++); usb_dev_id[i].name = name; usb_dev_id[i].vendor = vendor_id; usb_dev_id[i].device = device_id; diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h index 29f5211e645b0abcba70af272263a77b8ba12023..65b78b35b73c26d28087d27f0c13b4ea11b3738d 100644 --- a/drivers/net/usb/pegasus.h +++ b/drivers/net/usb/pegasus.h @@ -68,7 +68,7 @@ enum pegasus_registers { EpromData = 0x21, /* 0x21 low, 0x22 high byte */ EpromCtrl = 0x23, PhyAddr = 0x25, - PhyData = 0x26, /* 0x26 low, 0x27 high byte */ + PhyData = 0x26, /* 0x26 low, 0x27 high byte */ PhyCtrl = 0x28, UsbStst = 0x2a, EthTxStat0 = 0x2b, @@ -154,162 +154,162 @@ struct usb_eth_dev { #else /* PEGASUS_DEV */ -PEGASUS_DEV( "3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c, - DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA ) -PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104, - DEFAULT_GPIO_RESET | HAS_HOME_PNA ) -PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004, - DEFAULT_GPIO_RESET | HAS_HOME_PNA ) -PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007, - DEFAULT_GPIO_RESET | HAS_HOME_PNA ) -PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "ADMtek ADM8511 \"Pegasus II\" USB Ethernet", +PEGASUS_DEV("3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c, + DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA) +PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104, + DEFAULT_GPIO_RESET | HAS_HOME_PNA) +PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004, + DEFAULT_GPIO_RESET | HAS_HOME_PNA) +PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007, + DEFAULT_GPIO_RESET | HAS_HOME_PNA) +PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("ADMtek ADM8511 \"Pegasus II\" USB Ethernet", VENDOR_ADMTEK, 0x8511, - DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA ) -PEGASUS_DEV( "ADMtek ADM8513 \"Pegasus II\" USB Ethernet", + DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA) +PEGASUS_DEV("ADMtek ADM8513 \"Pegasus II\" USB Ethernet", VENDOR_ADMTEK, 0x8513, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet", + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet", VENDOR_ADMTEK, 0x8515, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)", + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)", VENDOR_ADMTEK, 0x0986, - DEFAULT_GPIO_RESET | HAS_HOME_PNA ) -PEGASUS_DEV( "AN986A USB MAC", VENDOR_ADMTEK, 1986, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, - DEFAULT_GPIO_RESET | PEGASUS_II ) + DEFAULT_GPIO_RESET | HAS_HOME_PNA) +PEGASUS_DEV("AN986A USB MAC", VENDOR_ADMTEK, 1986, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, + DEFAULT_GPIO_RESET | PEGASUS_II) /* * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors * with the same product IDs by checking the device class too. */ -PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987, - DEFAULT_GPIO_RESET | HAS_HOME_PNA ) -PEGASUS_DEV( "iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4002, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4102, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x400b, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x200c, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003, - DEFAULT_GPIO_RESET | HAS_HOME_PNA ) -PEGASUS_DEV( "D-Link DSB-650", VENDOR_DLINK, 0xabc1, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002, - DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA ) -PEGASUS_DEV( "ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "EasiDock Ethernet", VENDOR_MOBILITY, 0x0304, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "HP hn210c Ethernet USB", VENDOR_HP, 0x811c, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, +PEGASUS_DEV_CLASS("Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Billionton USB-100", VENDOR_BILLIONTON, 0x0986, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987, + DEFAULT_GPIO_RESET | HAS_HOME_PNA) +PEGASUS_DEV("iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Billionton USBE-100", VENDOR_BILLIONTON, 0x8511, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Corega FEther USB-TX", VENDOR_COREGA, 0x0004, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Corega FEther USB-TXS", VENDOR_COREGA, 0x000d, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4001, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4002, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4102, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x400b, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x200c, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003, + DEFAULT_GPIO_RESET | HAS_HOME_PNA) +PEGASUS_DEV("D-Link DSB-650", VENDOR_DLINK, 0xabc1, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002, + DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA) +PEGASUS_DEV("ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("EasiDock Ethernet", VENDOR_MOBILITY, 0x0304, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000, DEFAULT_GPIO_RESET) -PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005, - DEFAULT_GPIO_RESET | PEGASUS_II) -PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x2202, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2203, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2204, - DEFAULT_GPIO_RESET | HAS_HOME_PNA ) -PEGASUS_DEV( "Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206, - DEFAULT_GPIO_RESET | PEGASUS_II) -PEGASUS_DEV( "Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x200c, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Microsoft MN-110", VENDOR_MICROSOFT, 0x007a, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "NETGEAR FA101", VENDOR_NETGEAR, 0x1020, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "OCT Inc.", VENDOR_OCT, 0x0109, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "OCT USB TO Ethernet", VENDOR_OCT, 0x0901, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "SMC 202 USB Ethernet", VENDOR_SMC, 0x0200, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201, - DEFAULT_GPIO_RESET | PEGASUS_II) -PEGASUS_DEV( "SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100, - DEFAULT_GPIO_RESET ) -PEGASUS_DEV( "SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110, - DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001, - DEFAULT_GPIO_RESET | PEGASUS_II ) +PEGASUS_DEV("GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("HP hn210c Ethernet USB", VENDOR_HP, 0x811c, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("IO DATA USB ET/TX", VENDOR_IODATA, 0x0904, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x2202, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2203, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2204, + DEFAULT_GPIO_RESET | HAS_HOME_PNA) +PEGASUS_DEV("Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x200c, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("Microsoft MN-110", VENDOR_MICROSOFT, 0x007a, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("NETGEAR FA101", VENDOR_NETGEAR, 0x1020, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("OCT Inc.", VENDOR_OCT, 0x0109, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("OCT USB TO Ethernet", VENDOR_OCT, 0x0901, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("SMC 202 USB Ethernet", VENDOR_SMC, 0x0200, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100, + DEFAULT_GPIO_RESET) +PEGASUS_DEV("SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110, + DEFAULT_GPIO_RESET | PEGASUS_II) +PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001, + DEFAULT_GPIO_RESET | PEGASUS_II) #endif /* PEGASUS_DEV */ diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index f1942d69a0d5659f1eb085ab2727af3ba7e060e9..ee85c8b9a8582c6e5debff8635ef9ad444794415 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -165,7 +165,7 @@ struct lsi_umts { u8 gw_addr_len; /* NW-supplied GW address len */ u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ u8 reserved[8]; -} __attribute__ ((packed)); +} __packed; #define SIERRA_NET_LSI_COMMON_LEN 4 #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 81c76ada8e56c25259f9429203ae3f2f6a81ba50..3b03794ac3f56a66a446c098f6003253cd8f09b2 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -44,6 +44,7 @@ #include #include #include +#include #define DRIVER_VERSION "22-Aug-2005" @@ -158,16 +159,6 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) } EXPORT_SYMBOL_GPL(usbnet_get_endpoints); -static u8 nibble(unsigned char c) -{ - if (likely(isdigit(c))) - return c - '0'; - c = toupper(c); - if (likely(isxdigit(c))) - return 10 + c - 'A'; - return 0; -} - int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) { int tmp, i; @@ -183,7 +174,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) } for (i = tmp = 0; i < 6; i++, tmp += 2) dev->net->dev_addr [i] = - (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]); + (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]); return 0; } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); @@ -624,7 +615,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev) while (!skb_queue_empty(&dev->rxq) && !skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->done)) { - schedule_timeout(UNLINK_TIMEOUT_MS); + schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); set_current_state(TASK_UNINTERRUPTIBLE); netif_dbg(dev, ifdown, dev->net, "waited for %d urb completions\n", temp); @@ -643,7 +634,7 @@ int usbnet_stop (struct net_device *net) netif_stop_queue (net); netif_info(dev, ifdown, dev->net, - "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", + "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors); diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index c38191179fae3fa0f3f1d0ab38580b7ec0bd2728..f7b33ae7a70301e7c61e49c46e37e6180c316f32 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -193,7 +193,7 @@ struct rx_desc { __le32 pa_low; /* Low 32 bit PCI address */ __le16 pa_high; /* Next 16 bit PCI address (48 total) */ __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ -} __attribute__ ((__packed__)); +} __packed; /* * Transmit descriptor @@ -208,7 +208,7 @@ struct tdesc1 { __le16 vlan; u8 TCR; u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ -} __attribute__ ((__packed__)); +} __packed; enum { TD_QUEUE = cpu_to_le16(0x8000) @@ -218,7 +218,7 @@ struct td_buf { __le32 pa_low; __le16 pa_high; __le16 size; /* bits 0--13 - size, bit 15 - queue */ -} __attribute__ ((__packed__)); +} __packed; struct tx_desc { struct tdesc0 tdesc0; @@ -1096,7 +1096,7 @@ struct mac_regs { volatile __le16 PatternCRC[8]; /* 0xB0 */ volatile __le32 ByteMask[4][4]; /* 0xC0 */ -} __attribute__ ((__packed__)); +} __packed; enum hw_mib { @@ -1216,7 +1216,7 @@ struct arp_packet { u8 ar_sip[4]; u8 ar_tha[ETH_ALEN]; u8 ar_tip[4]; -} __attribute__ ((__packed__)); +} __packed; struct _magic_packet { u8 dest_mac[6]; @@ -1224,7 +1224,7 @@ struct _magic_packet { __be16 type; u8 MAC[16][6]; u8 password[6]; -} __attribute__ ((__packed__)); +} __packed; /* * Store for chip context when saving and restoring status. Not diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h index b4889e6c4a577d4ca323e68e743d321aee7979ab..ca7727b940adfd62c58bb01c32737a9c88dedcc0 100644 --- a/drivers/net/vmxnet3/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/vmxnet3_defs.h @@ -464,6 +464,9 @@ enum vmxnet3_intr_type { /* addition 1 for events */ #define VMXNET3_MAX_INTRS 25 +/* value of intrCtrl */ +#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */ + struct Vmxnet3_IntrConf { bool autoMask; @@ -471,7 +474,8 @@ struct Vmxnet3_IntrConf { u8 eventIntrIdx; u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for * each intr */ - __le32 reserved[3]; + __le32 intrCtrl; + __le32 reserved[2]; }; /* one bit per VLAN ID, the size is in the units of u32 */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 989b742551ac4e44a2c6775c7c65cb1a796d3ba5..abe0ff53daf353c1be44876b19cf37f4cefe4403 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -72,6 +72,8 @@ vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) for (i = 0; i < adapter->intr.num_intrs; i++) vmxnet3_enable_intr(adapter, i); + adapter->shared->devRead.intrConf.intrCtrl &= + cpu_to_le32(~VMXNET3_IC_DISABLE_ALL); } @@ -80,6 +82,8 @@ vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) { int i; + adapter->shared->devRead.intrConf.intrCtrl |= + cpu_to_le32(VMXNET3_IC_DISABLE_ALL); for (i = 0; i < adapter->intr.num_intrs; i++) vmxnet3_disable_intr(adapter, i); } @@ -128,7 +132,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) * Check the link state. This may start or stop the tx queue. */ static void -vmxnet3_check_link(struct vmxnet3_adapter *adapter) +vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) { u32 ret; @@ -141,14 +145,16 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter) if (!netif_carrier_ok(adapter->netdev)) netif_carrier_on(adapter->netdev); - vmxnet3_tq_start(&adapter->tx_queue, adapter); + if (affectTxQueue) + vmxnet3_tq_start(&adapter->tx_queue, adapter); } else { printk(KERN_INFO "%s: NIC Link is Down\n", adapter->netdev->name); if (netif_carrier_ok(adapter->netdev)) netif_carrier_off(adapter->netdev); - vmxnet3_tq_stop(&adapter->tx_queue, adapter); + if (affectTxQueue) + vmxnet3_tq_stop(&adapter->tx_queue, adapter); } } @@ -163,7 +169,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if link state has changed */ if (events & VMXNET3_ECR_LINK) - vmxnet3_check_link(adapter); + vmxnet3_check_link(adapter, true); /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { @@ -658,8 +664,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, while (len) { u32 buf_size; - buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ? - VMXNET3_MAX_TX_BUF_SIZE : len; + if (len < VMXNET3_MAX_TX_BUF_SIZE) { + buf_size = len; + dw2 |= len; + } else { + buf_size = VMXNET3_MAX_TX_BUF_SIZE; + /* spec says that for TxDesc.len, 0 == 2^14 */ + } tbi = tq->buf_info + tq->tx_ring.next2fill; tbi->map_type = VMXNET3_MAP_SINGLE; @@ -667,13 +678,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, skb->data + buf_offset, buf_size, PCI_DMA_TODEVICE); - tbi->len = buf_size; /* this automatically convert 2^14 to 0 */ + tbi->len = buf_size; gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); - gdesc->dword[2] = cpu_to_le32(dw2 | buf_size); + gdesc->dword[2] = cpu_to_le32(dw2); gdesc->dword[3] = 0; dev_dbg(&adapter->netdev->dev, @@ -1825,6 +1836,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; + devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); /* rx filter settings */ devRead->rxFilterConf.rxMode = 0; @@ -1889,7 +1901,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) * Check link state when first activating device. It will start the * tx queue if the link is up. */ - vmxnet3_check_link(adapter); + vmxnet3_check_link(adapter, true); napi_enable(&adapter->napi); vmxnet3_enable_all_intrs(adapter); @@ -2295,9 +2307,13 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) adapter->intr.mask_mode = (cfg >> 2) & 0x3; if (adapter->intr.type == VMXNET3_IT_AUTO) { - int err; + adapter->intr.type = VMXNET3_IT_MSIX; + } #ifdef CONFIG_PCI_MSI + if (adapter->intr.type == VMXNET3_IT_MSIX) { + int err; + adapter->intr.msix_entries[0].entry = 0; err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, VMXNET3_LINUX_MAX_MSIX_VECT); @@ -2306,15 +2322,18 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) adapter->intr.type = VMXNET3_IT_MSIX; return; } -#endif + adapter->intr.type = VMXNET3_IT_MSI; + } + if (adapter->intr.type == VMXNET3_IT_MSI) { + int err; err = pci_enable_msi(adapter->pdev); if (!err) { adapter->intr.num_intrs = 1; - adapter->intr.type = VMXNET3_IT_MSI; return; } } +#endif /* CONFIG_PCI_MSI */ adapter->intr.type = VMXNET3_IT_INTX; @@ -2358,6 +2377,7 @@ vmxnet3_reset_work(struct work_struct *data) return; /* if the device is closed, we must leave it alone */ + rtnl_lock(); if (netif_running(adapter->netdev)) { printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); vmxnet3_quiesce_dev(adapter); @@ -2366,6 +2386,7 @@ vmxnet3_reset_work(struct work_struct *data) } else { printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); } + rtnl_unlock(); clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); } @@ -2491,6 +2512,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, } set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); + vmxnet3_check_link(adapter, false); atomic_inc(&devices_found); return 0; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 3935c4493fb708bbaaba992c6f1f7e72e2e4ad3a..7e4b5a89165a5804dcb68dd78d225163ea71efd7 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -275,27 +275,27 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) } } -static u32 -vmxnet3_get_flags(struct net_device *netdev) { - return netdev->features; -} - static int -vmxnet3_set_flags(struct net_device *netdev, u32 data) { +vmxnet3_set_flags(struct net_device *netdev, u32 data) +{ struct vmxnet3_adapter *adapter = netdev_priv(netdev); u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; + if (data & ~ETH_FLAG_LRO) + return -EOPNOTSUPP; + if (lro_requested ^ lro_present) { /* toggle the LRO feature*/ netdev->features ^= NETIF_F_LRO; /* update harware LRO capability accordingly */ if (lro_requested) - adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO; + adapter->shared->devRead.misc.uptFeatures |= + cpu_to_le64(UPT1_F_LRO); else adapter->shared->devRead.misc.uptFeatures &= - ~UPT1_F_LRO; + cpu_to_le64(~UPT1_F_LRO); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); } @@ -554,7 +554,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = { .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_strings = vmxnet3_get_strings, - .get_flags = vmxnet3_get_flags, + .get_flags = ethtool_op_get_flags, .set_flags = vmxnet3_set_flags, .get_sset_count = vmxnet3_get_sset_count, .get_ethtool_stats = vmxnet3_get_ethtool_stats, diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 34f392f46fb1e026e3d766d8fc9dd61dcd678328..2121c735cabd6ab3fddf335d302f7803cef87a76 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -68,10 +68,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01000500 +#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00 /* diff --git a/drivers/net/vxge/Makefile b/drivers/net/vxge/Makefile index 8992ca26b2777e3c088d5d2ee6de36f547045827..b625e2c503f5876c7484ad2fc24dfebcba33e4ad 100644 --- a/drivers/net/vxge/Makefile +++ b/drivers/net/vxge/Makefile @@ -1,5 +1,5 @@ # -# Makefile for Neterion Inc's X3100 Series 10 GbE PCIe # I/O +# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O # Virtualized Server Adapter linux driver obj-$(CONFIG_VXGE) += vxge.o diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 297f0d2020735e4dffe20c689d2290c9d8716075..0e6db5935609cf47efb6c1f2788e6bfeac496db8 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #include #include diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index 4ae2625d4d8f26001c81bc4ccf133169c5140693..1a94343023cb20fdb7f0f3a5b4d4b83ab1b9e1b3 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-config.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #ifndef VXGE_CONFIG_H #define VXGE_CONFIG_H diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index cadef8549c0632820fc014fa84b0730044f88538..05679e306fdd5711bc15831fda2d95c0fe54c81a 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-ethtool.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #include #include diff --git a/drivers/net/vxge/vxge-ethtool.h b/drivers/net/vxge/vxge-ethtool.h index 1c3df0a34acc3aefc6464a910b05f91a9b57a22d..6cf3044d7f438283d6f6455aba047322a151544d 100644 --- a/drivers/net/vxge/vxge-ethtool.h +++ b/drivers/net/vxge/vxge-ethtool.h @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-ethtool.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #ifndef _VXGE_ETHTOOL_H #define _VXGE_ETHTOOL_H diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index fc8b2d7a0919d1cd9709f8c586f315bc5ec96e82..c7c5605b3728384069e0e3aa187a30b0e85bd512 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * -* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O +* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. -* Copyright(c) 2002-2009 Neterion Inc. +* Copyright(c) 2002-2010 Exar Corp. * * The module loadable parameters that are supported by the driver and a brief * explanation of all the variables: @@ -41,6 +41,8 @@ * ******************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -87,7 +89,6 @@ static inline int is_vxge_card_up(struct vxgedev *vdev) static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) { - unsigned long flags = 0; struct sk_buff **skb_ptr = NULL; struct sk_buff **temp; #define NR_SKB_COMPLETED 128 @@ -98,15 +99,16 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) more = 0; skb_ptr = completed; - if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { + if (__netif_tx_trylock(fifo->txq)) { vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, NR_SKB_COMPLETED, &more); - spin_unlock_irqrestore(&fifo->tx_lock, flags); + __netif_tx_unlock(fifo->txq); } + /* free SKBs */ for (temp = completed; temp != skb_ptr; temp++) dev_kfree_skb_irq(*temp); - } while (more) ; + } while (more); } static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) @@ -130,80 +132,6 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) } } -/* - * MultiQ manipulation helper functions - */ -void vxge_stop_all_tx_queue(struct vxgedev *vdev) -{ - int i; - struct net_device *dev = vdev->ndev; - - if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { - for (i = 0; i < vdev->no_of_vpath; i++) - vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP; - } - netif_tx_stop_all_queues(dev); -} - -void vxge_stop_tx_queue(struct vxge_fifo *fifo) -{ - struct net_device *dev = fifo->ndev; - - struct netdev_queue *txq = NULL; - if (fifo->tx_steering_type == TX_MULTIQ_STEERING) - txq = netdev_get_tx_queue(dev, fifo->driver_id); - else { - txq = netdev_get_tx_queue(dev, 0); - fifo->queue_state = VPATH_QUEUE_STOP; - } - - netif_tx_stop_queue(txq); -} - -void vxge_start_all_tx_queue(struct vxgedev *vdev) -{ - int i; - struct net_device *dev = vdev->ndev; - - if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { - for (i = 0; i < vdev->no_of_vpath; i++) - vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; - } - netif_tx_start_all_queues(dev); -} - -static void vxge_wake_all_tx_queue(struct vxgedev *vdev) -{ - int i; - struct net_device *dev = vdev->ndev; - - if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { - for (i = 0; i < vdev->no_of_vpath; i++) - vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; - } - netif_tx_wake_all_queues(dev); -} - -void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb) -{ - struct net_device *dev = fifo->ndev; - - int vpath_no = fifo->driver_id; - struct netdev_queue *txq = NULL; - if (fifo->tx_steering_type == TX_MULTIQ_STEERING) { - txq = netdev_get_tx_queue(dev, vpath_no); - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); - } else { - txq = netdev_get_tx_queue(dev, 0); - if (fifo->queue_state == VPATH_QUEUE_STOP) - if (netif_tx_queue_stopped(txq)) { - fifo->queue_state = VPATH_QUEUE_START; - netif_tx_wake_queue(txq); - } - } -} - /* * vxge_callback_link_up * @@ -218,11 +146,11 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev) vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); - printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); + netdev_notice(vdev->ndev, "Link Up\n"); vdev->stats.link_up++; netif_carrier_on(vdev->ndev); - vxge_wake_all_tx_queue(vdev); + netif_tx_wake_all_queues(vdev->ndev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); @@ -242,11 +170,11 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev) vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); - printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); + netdev_notice(vdev->ndev, "Link Down\n"); vdev->stats.link_down++; netif_carrier_off(vdev->ndev); - vxge_stop_all_tx_queue(vdev); + netif_tx_stop_all_queues(vdev->ndev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); @@ -677,7 +605,8 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, &dtr, &t_code) == VXGE_HW_OK); *skb_ptr = done_skb; - vxge_wake_tx_queue(fifo, skb); + if (netif_tx_queue_stopped(fifo->txq)) + netif_tx_wake_queue(fifo->txq); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", @@ -686,8 +615,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, } /* select a vpath to transmit the packet */ -static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, - int *do_lock) +static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb) { u16 queue_len, counter = 0; if (skb->protocol == htons(ETH_P_IP)) { @@ -706,12 +634,6 @@ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, vdev->vpath_selector[queue_len - 1]; if (counter >= queue_len) counter = queue_len - 1; - - if (ip->protocol == IPPROTO_UDP) { -#ifdef NETIF_F_LLTX - *do_lock = 0; -#endif - } } } return counter; @@ -808,8 +730,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) * * This function is the Tx entry point of the driver. Neterion NIC supports * certain protocol assist features on Tx side, namely CSO, S/G, LSO. - * NOTE: when device cant queue the pkt, just the trans_start variable will - * not be upadted. */ static netdev_tx_t vxge_xmit(struct sk_buff *skb, struct net_device *dev) @@ -826,9 +746,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) struct vxge_tx_priv *txdl_priv = NULL; struct __vxge_hw_fifo *fifo_hw; int offload_type; - unsigned long flags = 0; int vpath_no = 0; - int do_spin_tx_lock = 1; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); @@ -864,7 +782,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) vpath_no = skb_get_queue_mapping(skb); else if (vdev->config.tx_steering_type == TX_PORT_STEERING) - vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock); + vpath_no = vxge_get_vpath_no(vdev, skb); vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); @@ -874,46 +792,29 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) fifo = &vdev->vpaths[vpath_no].fifo; fifo_hw = fifo->handle; - if (do_spin_tx_lock) - spin_lock_irqsave(&fifo->tx_lock, flags); - else { - if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) - return NETDEV_TX_LOCKED; - } + if (netif_tx_queue_stopped(fifo->txq)) + return NETDEV_TX_BUSY; - if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) { - if (netif_subqueue_stopped(dev, skb)) { - spin_unlock_irqrestore(&fifo->tx_lock, flags); - return NETDEV_TX_BUSY; - } - } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) { - if (netif_queue_stopped(dev)) { - spin_unlock_irqrestore(&fifo->tx_lock, flags); - return NETDEV_TX_BUSY; - } - } avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); if (avail == 0) { vxge_debug_tx(VXGE_ERR, "%s: No free TXDs available", dev->name); fifo->stats.txd_not_free++; - vxge_stop_tx_queue(fifo); - goto _exit2; + goto _exit0; } /* Last TXD? Stop tx queue to avoid dropping packets. TX * completion will resume the queue. */ if (avail == 1) - vxge_stop_tx_queue(fifo); + netif_tx_stop_queue(fifo->txq); status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); if (unlikely(status != VXGE_HW_OK)) { vxge_debug_tx(VXGE_ERR, "%s: Out of descriptors .", dev->name); fifo->stats.txd_out_of_desc++; - vxge_stop_tx_queue(fifo); - goto _exit2; + goto _exit0; } vxge_debug_tx(VXGE_TRACE, @@ -933,9 +834,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { vxge_hw_fifo_txdl_free(fifo_hw, dtr); - vxge_stop_tx_queue(fifo); fifo->stats.pci_map_fail++; - goto _exit2; + goto _exit0; } txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); @@ -958,13 +858,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) if (!frag->size) continue; - dma_pointer = - (u64)pci_map_page(fifo->pdev, frag->page, + dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) - goto _exit0; + goto _exit2; vxge_debug_tx(VXGE_TRACE, "%s: %s:%d frag = %d dma_pointer = 0x%llx", dev->name, __func__, __LINE__, i, @@ -979,11 +878,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) offload_type = vxge_offload_type(skb); if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { - int mss = vxge_tcp_mss(skb); if (mss) { - vxge_debug_tx(VXGE_TRACE, - "%s: %s:%d mss = %d", + vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d", dev->name, __func__, __LINE__, mss); vxge_hw_fifo_txdl_mss_set(dtr, mss); } else { @@ -1001,19 +898,13 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); vxge_hw_fifo_txdl_post(fifo_hw, dtr); -#ifdef NETIF_F_LLTX - dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ -#endif - spin_unlock_irqrestore(&fifo->tx_lock, flags); - VXGE_COMPLETE_VPATH_TX(fifo); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); return NETDEV_TX_OK; -_exit0: +_exit2: vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); - _exit1: j = 0; frag = &skb_shinfo(skb)->frags[0]; @@ -1028,10 +919,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) } vxge_hw_fifo_txdl_free(fifo_hw, dtr); -_exit2: +_exit0: + netif_tx_stop_queue(fifo->txq); dev_kfree_skb(skb); - spin_unlock_irqrestore(&fifo->tx_lock, flags); - VXGE_COMPLETE_VPATH_TX(fifo); return NETDEV_TX_OK; } @@ -1121,7 +1011,8 @@ static void vxge_set_multicast(struct net_device *dev) struct netdev_hw_addr *ha; struct vxgedev *vdev; int i, mcast_cnt = 0; - struct __vxge_hw_device *hldev; + struct __vxge_hw_device *hldev; + struct vxge_vpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info; int vpath_idx = 0; @@ -1141,46 +1032,48 @@ static void vxge_set_multicast(struct net_device *dev) if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_assert(vdev->vpaths[i].is_open); - status = vxge_hw_vpath_mcast_enable( - vdev->vpaths[i].handle); + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, "failed to enable " + "multicast, status %d", status); vdev->all_multi_flg = 1; } - } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { + } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_assert(vdev->vpaths[i].is_open); - status = vxge_hw_vpath_mcast_disable( - vdev->vpaths[i].handle); - vdev->all_multi_flg = 1; + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + status = vxge_hw_vpath_mcast_disable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, "failed to disable " + "multicast, status %d", status); + vdev->all_multi_flg = 0; } } - if (status != VXGE_HW_OK) - vxge_debug_init(VXGE_ERR, - "failed to %s multicast, status %d", - dev->flags & IFF_ALLMULTI ? - "enable" : "disable", status); if (!vdev->config.addr_learn_en) { - if (dev->flags & IFF_PROMISC) { - for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_assert(vdev->vpaths[i].is_open); + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + + if (dev->flags & IFF_PROMISC) status = vxge_hw_vpath_promisc_enable( - vdev->vpaths[i].handle); - } - } else { - for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_assert(vdev->vpaths[i].is_open); + vpath->handle); + else status = vxge_hw_vpath_promisc_disable( - vdev->vpaths[i].handle); - } + vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, "failed to %s promisc" + ", status %d", dev->flags&IFF_PROMISC ? + "enable" : "disable", status); } } memset(&mac_info, 0, sizeof(struct macInfo)); /* Update individual M_CAST address list */ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { - mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; list_head = &vdev->vpaths[0].mac_addr_list; if ((netdev_mc_count(dev) + @@ -1190,14 +1083,7 @@ static void vxge_set_multicast(struct net_device *dev) /* Delete previous MC's */ for (i = 0; i < mcast_cnt; i++) { - if (!list_empty(list_head)) - mac_entry = (struct vxge_mac_addrs *) - list_first_entry(list_head, - struct vxge_mac_addrs, - item); - list_for_each_safe(entry, next, list_head) { - mac_entry = (struct vxge_mac_addrs *) entry; /* Copy the mac address to delete */ mac_address = (u8 *)&mac_entry->macaddr; @@ -1240,9 +1126,7 @@ static void vxge_set_multicast(struct net_device *dev) mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; /* Delete previous MC's */ for (i = 0; i < mcast_cnt; i++) { - list_for_each_safe(entry, next, list_head) { - mac_entry = (struct vxge_mac_addrs *) entry; /* Copy the mac address to delete */ mac_address = (u8 *)&mac_entry->macaddr; @@ -1262,9 +1146,10 @@ static void vxge_set_multicast(struct net_device *dev) /* Enable all multicast */ for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_assert(vdev->vpaths[i].is_open); - status = vxge_hw_vpath_mcast_enable( - vdev->vpaths[i].handle); + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + + status = vxge_hw_vpath_mcast_enable(vpath->handle); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s:%d Enabling all multicasts failed", @@ -1425,6 +1310,7 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) { enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; int ret = 0; /* check if device is down already */ @@ -1435,12 +1321,10 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) return 0; - if (vdev->vpaths[vp_id].handle) { - if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle) - == VXGE_HW_OK) { + if (vpath->handle) { + if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { if (is_vxge_card_up(vdev) && - vxge_hw_vpath_recover_from_reset( - vdev->vpaths[vp_id].handle) + vxge_hw_vpath_recover_from_reset(vpath->handle) != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_recover_from_reset" @@ -1456,11 +1340,20 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) } else return VXGE_HW_FAIL; - vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); - vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); + vxge_restore_vpath_mac_addr(vpath); + vxge_restore_vpath_vid_table(vpath); /* Enable all broadcast */ - vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle); + vxge_hw_vpath_bcast_enable(vpath->handle); + + /* Enable all multicast */ + if (vdev->all_multi_flg) { + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s:%d Enabling multicast failed", + __func__, __LINE__); + } /* Enable the interrupts */ vxge_vpath_intr_enable(vdev, vp_id); @@ -1468,17 +1361,18 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) smp_wmb(); /* Enable the flow of traffic through the vpath */ - vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle); + vxge_hw_vpath_enable(vpath->handle); smp_wmb(); - vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle); - vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK; + vxge_hw_vpath_rx_doorbell_init(vpath->handle); + vpath->ring.last_status = VXGE_HW_OK; /* Vpath reset done */ clear_bit(vp_id, &vdev->vp_reset); /* Start the vpath queue */ - vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL); + if (netif_tx_queue_stopped(vpath->fifo.txq)) + netif_tx_wake_queue(vpath->fifo.txq); return ret; } @@ -1512,9 +1406,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) vxge_debug_init(VXGE_ERR, "%s: execution mode is debug, returning..", vdev->ndev->name); - clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); - vxge_stop_all_tx_queue(vdev); - return 0; + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + netif_tx_stop_all_queues(vdev->ndev); + return 0; } } @@ -1523,7 +1417,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) switch (vdev->cric_err_event) { case VXGE_HW_EVENT_UNKNOWN: - vxge_stop_all_tx_queue(vdev); + netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "unknown error", @@ -1544,7 +1438,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) case VXGE_HW_EVENT_VPATH_ERR: break; case VXGE_HW_EVENT_CRITICAL_ERR: - vxge_stop_all_tx_queue(vdev); + netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "serious error", @@ -1554,7 +1448,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) ret = -EPERM; goto out; case VXGE_HW_EVENT_SERR: - vxge_stop_all_tx_queue(vdev); + netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "serious error", @@ -1566,7 +1460,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) ret = -EPERM; goto out; case VXGE_HW_EVENT_SLOT_FREEZE: - vxge_stop_all_tx_queue(vdev); + netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "slot freeze", @@ -1580,7 +1474,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) } if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) - vxge_stop_all_tx_queue(vdev); + netif_tx_stop_all_queues(vdev->ndev); if (event == VXGE_LL_FULL_RESET) { status = vxge_reset_all_vpaths(vdev); @@ -1640,7 +1534,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); } - vxge_wake_all_tx_queue(vdev); + netif_tx_wake_all_queues(vdev->ndev); } out: @@ -1661,8 +1555,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) */ int vxge_reset(struct vxgedev *vdev) { - do_vxge_reset(vdev, VXGE_LL_FULL_RESET); - return 0; + return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); } /** @@ -2025,17 +1918,17 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) /* reset vpaths */ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) { - int i; enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + int i; - for (i = 0; i < vdev->no_of_vpath; i++) - if (vdev->vpaths[i].handle) { - if (vxge_hw_vpath_reset(vdev->vpaths[i].handle) - == VXGE_HW_OK) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + if (vpath->handle) { + if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { if (is_vxge_card_up(vdev) && vxge_hw_vpath_recover_from_reset( - vdev->vpaths[i].handle) - != VXGE_HW_OK) { + vpath->handle) != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_recover_" "from_reset failed for vpath: " @@ -2049,83 +1942,93 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) return status; } } + } + return status; } /* close vpaths */ void vxge_close_vpaths(struct vxgedev *vdev, int index) { + struct vxge_vpath *vpath; int i; + for (i = index; i < vdev->no_of_vpath; i++) { - if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) { - vxge_hw_vpath_close(vdev->vpaths[i].handle); + vpath = &vdev->vpaths[i]; + + if (vpath->handle && vpath->is_open) { + vxge_hw_vpath_close(vpath->handle); vdev->stats.vpaths_open--; } - vdev->vpaths[i].is_open = 0; - vdev->vpaths[i].handle = NULL; + vpath->is_open = 0; + vpath->handle = NULL; } } /* open vpaths */ int vxge_open_vpaths(struct vxgedev *vdev) { + struct vxge_hw_vpath_attr attr; enum vxge_hw_status status; - int i; + struct vxge_vpath *vpath; u32 vp_id = 0; - struct vxge_hw_vpath_attr attr; + int i; for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_assert(vdev->vpaths[i].is_configured); - attr.vp_id = vdev->vpaths[i].device_id; + vpath = &vdev->vpaths[i]; + + vxge_assert(vpath->is_configured); + attr.vp_id = vpath->device_id; attr.fifo_attr.callback = vxge_xmit_compl; attr.fifo_attr.txdl_term = vxge_tx_term; attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); - attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo; + attr.fifo_attr.userdata = &vpath->fifo; attr.ring_attr.callback = vxge_rx_1b_compl; attr.ring_attr.rxd_init = vxge_rx_initial_replenish; attr.ring_attr.rxd_term = vxge_rx_term; attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); - attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring; + attr.ring_attr.userdata = &vpath->ring; - vdev->vpaths[i].ring.ndev = vdev->ndev; - vdev->vpaths[i].ring.pdev = vdev->pdev; - status = vxge_hw_vpath_open(vdev->devh, &attr, - &(vdev->vpaths[i].handle)); + vpath->ring.ndev = vdev->ndev; + vpath->ring.pdev = vdev->pdev; + status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); if (status == VXGE_HW_OK) { - vdev->vpaths[i].fifo.handle = + vpath->fifo.handle = (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; - vdev->vpaths[i].ring.handle = + vpath->ring.handle = (struct __vxge_hw_ring *)attr.ring_attr.userdata; - vdev->vpaths[i].fifo.tx_steering_type = + vpath->fifo.tx_steering_type = vdev->config.tx_steering_type; - vdev->vpaths[i].fifo.ndev = vdev->ndev; - vdev->vpaths[i].fifo.pdev = vdev->pdev; - vdev->vpaths[i].fifo.indicate_max_pkts = + vpath->fifo.ndev = vdev->ndev; + vpath->fifo.pdev = vdev->pdev; + if (vdev->config.tx_steering_type) + vpath->fifo.txq = + netdev_get_tx_queue(vdev->ndev, i); + else + vpath->fifo.txq = + netdev_get_tx_queue(vdev->ndev, 0); + vpath->fifo.indicate_max_pkts = vdev->config.fifo_indicate_max_pkts; - vdev->vpaths[i].ring.rx_vector_no = 0; - vdev->vpaths[i].ring.rx_csum = vdev->rx_csum; - vdev->vpaths[i].is_open = 1; - vdev->vp_handles[i] = vdev->vpaths[i].handle; - vdev->vpaths[i].ring.gro_enable = - vdev->config.gro_enable; - vdev->vpaths[i].ring.vlan_tag_strip = - vdev->vlan_tag_strip; + vpath->ring.rx_vector_no = 0; + vpath->ring.rx_csum = vdev->rx_csum; + vpath->is_open = 1; + vdev->vp_handles[i] = vpath->handle; + vpath->ring.gro_enable = vdev->config.gro_enable; + vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; vdev->stats.vpaths_open++; } else { vdev->stats.vpath_open_fail++; vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to open " "with status: %d", - vdev->ndev->name, vdev->vpaths[i].device_id, + vdev->ndev->name, vpath->device_id, status); vxge_close_vpaths(vdev, 0); return -EPERM; } - vp_id = - ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)-> - vpath->vp_id; + vp_id = vpath->handle->vpath->vp_id; vdev->vpaths_deployed |= vxge_mBIT(vp_id); } return VXGE_HW_OK; @@ -2299,7 +2202,6 @@ static int vxge_alloc_msix(struct vxgedev *vdev) vdev->vxge_entries[j].in_use = 0; ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); - if (ret > 0) { vxge_debug_init(VXGE_ERR, "%s: MSI-X enable failed for %d vectors, ret: %d", @@ -2345,17 +2247,16 @@ static int vxge_enable_msix(struct vxgedev *vdev) ret = vxge_alloc_msix(vdev); if (!ret) { for (i = 0; i < vdev->no_of_vpath; i++) { + struct vxge_vpath *vpath = &vdev->vpaths[i]; - /* If fifo or ring are not enabled - the MSIX vector for that should be set to 0 - Hence initializeing this array to all 0s. - */ - vdev->vpaths[i].ring.rx_vector_no = - (vdev->vpaths[i].device_id * - VXGE_HW_VPATH_MSIX_ACTIVE) + 1; + /* If fifo or ring are not enabled, the MSIX vector for + * it should be set to 0. + */ + vpath->ring.rx_vector_no = (vpath->device_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + 1; - vxge_hw_vpath_msix_set(vdev->vpaths[i].handle, - tim_msix_id, VXGE_ALARM_MSIX_ID); + vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, + VXGE_ALARM_MSIX_ID); } } @@ -2570,9 +2471,10 @@ static void vxge_poll_vp_reset(unsigned long data) static void vxge_poll_vp_lockup(unsigned long data) { struct vxgedev *vdev = (struct vxgedev *)data; - int i; - struct vxge_ring *ring; enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + struct vxge_ring *ring; + int i; for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; @@ -2586,13 +2488,13 @@ static void vxge_poll_vp_lockup(unsigned long data) /* schedule vpath reset */ if (!test_and_set_bit(i, &vdev->vp_reset)) { + vpath = &vdev->vpaths[i]; /* disable interrupts for this vpath */ vxge_vpath_intr_disable(vdev, i); /* stop the queue for this vpath */ - vxge_stop_tx_queue(&vdev->vpaths[i]. - fifo); + netif_tx_stop_queue(vpath->fifo.txq); continue; } } @@ -2621,6 +2523,7 @@ vxge_open(struct net_device *dev) enum vxge_hw_status status; struct vxgedev *vdev; struct __vxge_hw_device *hldev; + struct vxge_vpath *vpath; int ret = 0; int i; u64 val64, function_mode; @@ -2654,20 +2557,21 @@ vxge_open(struct net_device *dev) goto out1; } - if (vdev->config.intr_type != MSI_X) { netif_napi_add(dev, &vdev->napi, vxge_poll_inta, vdev->config.napi_weight); napi_enable(&vdev->napi); - for (i = 0; i < vdev->no_of_vpath; i++) - vdev->vpaths[i].ring.napi_p = &vdev->napi; + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vpath->ring.napi_p = &vdev->napi; + } } else { for (i = 0; i < vdev->no_of_vpath; i++) { - netif_napi_add(dev, &vdev->vpaths[i].ring.napi, + vpath = &vdev->vpaths[i]; + netif_napi_add(dev, &vpath->ring.napi, vxge_poll_msix, vdev->config.napi_weight); - napi_enable(&vdev->vpaths[i].ring.napi); - vdev->vpaths[i].ring.napi_p = - &vdev->vpaths[i].ring.napi; + napi_enable(&vpath->ring.napi); + vpath->ring.napi_p = &vpath->ring.napi; } } @@ -2684,9 +2588,10 @@ vxge_open(struct net_device *dev) } for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + /* set initial mtu before enabling the device */ - status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle, - vdev->mtu); + status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: fatal: can not set new MTU", dev->name); @@ -2700,10 +2605,21 @@ vxge_open(struct net_device *dev) "%s: MTU is %d", vdev->ndev->name, vdev->mtu); VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); - /* Reprogram the DA table with populated mac addresses */ - for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_restore_vpath_mac_addr(&vdev->vpaths[i]); - vxge_restore_vpath_vid_table(&vdev->vpaths[i]); + /* Restore the DA, VID table and also multicast and promiscuous mode + * states + */ + if (vdev->all_multi_flg) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_restore_vpath_mac_addr(vpath); + vxge_restore_vpath_vid_table(vpath); + + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s:%d Enabling multicast failed", + __func__, __LINE__); + } } /* Enable vpath to sniff all unicast/multicast traffic that not @@ -2732,14 +2648,14 @@ vxge_open(struct net_device *dev) /* Enabling Bcast and mcast for all vpath */ for (i = 0; i < vdev->no_of_vpath; i++) { - status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle); + vpath = &vdev->vpaths[i]; + status = vxge_hw_vpath_bcast_enable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "%s : Can not enable bcast for vpath " "id %d", dev->name, i); if (vdev->config.addr_learn_en) { - status = - vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle); + status = vxge_hw_vpath_mcast_enable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "%s : Can not enable mcast for vpath " @@ -2765,7 +2681,7 @@ vxge_open(struct net_device *dev) if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { netif_carrier_on(vdev->ndev); - printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); + netdev_notice(vdev->ndev, "Link Up\n"); vdev->stats.link_up++; } @@ -2774,12 +2690,14 @@ vxge_open(struct net_device *dev) smp_wmb(); for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_hw_vpath_enable(vdev->vpaths[i].handle); + vpath = &vdev->vpaths[i]; + + vxge_hw_vpath_enable(vpath->handle); smp_wmb(); - vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); + vxge_hw_vpath_rx_doorbell_init(vpath->handle); } - vxge_start_all_tx_queue(vdev); + netif_tx_start_all_queues(vdev->ndev); goto out0; out2: @@ -2901,8 +2819,8 @@ int do_vxge_close(struct net_device *dev, int do_io) } netif_carrier_off(vdev->ndev); - printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); - vxge_stop_all_tx_queue(vdev); + netdev_notice(vdev->ndev, "Link Down\n"); + netif_tx_stop_all_queues(vdev->ndev); /* Note that at this point xmit() is stopped by upper layer */ if (do_io) @@ -3211,11 +3129,11 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, struct net_device *ndev; enum vxge_hw_status status = VXGE_HW_OK; struct vxgedev *vdev; - int i, ret = 0, no_of_queue = 1; + int ret = 0, no_of_queue = 1; u64 stat; *vdev_out = NULL; - if (config->tx_steering_type == TX_MULTIQ_STEERING) + if (config->tx_steering_type) no_of_queue = no_of_vpath; ndev = alloc_etherdev_mq(sizeof(struct vxgedev), @@ -3284,16 +3202,6 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, if (vdev->config.gro_enable) ndev->features |= NETIF_F_GRO; - if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) - ndev->real_num_tx_queues = no_of_vpath; - -#ifdef NETIF_F_LLTX - ndev->features |= NETIF_F_LLTX; -#endif - - for (i = 0; i < no_of_vpath; i++) - spin_lock_init(&vdev->vpaths[i].fifo.tx_lock); - if (register_netdev(ndev)) { vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s: %s : device registration failed!", @@ -3393,6 +3301,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, { struct net_device *dev = hldev->ndev; struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct vxge_vpath *vpath = NULL; int vpath_idx; vxge_debug_entryexit(vdev->level_trace, @@ -3403,9 +3312,11 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, */ vdev->cric_err_event = type; - for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) - if (vdev->vpaths[vpath_idx].device_id == vp_id) + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + vpath = &vdev->vpaths[vpath_idx]; + if (vpath->device_id == vp_id) break; + } if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { if (type == VXGE_HW_EVENT_SLOT_FREEZE) { @@ -3442,8 +3353,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, vxge_vpath_intr_disable(vdev, vpath_idx); /* stop the queue for this vpath */ - vxge_stop_tx_queue(&vdev->vpaths[vpath_idx]. - fifo); + netif_tx_stop_queue(vpath->fifo.txq); } } } @@ -3936,9 +3846,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) struct vxgedev *vdev = netdev_priv(netdev); if (pci_enable_device(pdev)) { - printk(KERN_ERR "%s: " - "Cannot re-enable device after reset\n", - VXGE_DRIVER_NAME); + netdev_err(netdev, "Cannot re-enable device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } @@ -3963,9 +3871,8 @@ static void vxge_io_resume(struct pci_dev *pdev) if (netif_running(netdev)) { if (vxge_open(netdev)) { - printk(KERN_ERR "%s: " - "Can't bring device back up after reset\n", - VXGE_DRIVER_NAME); + netdev_err(netdev, + "Can't bring device back up after reset\n"); return; } } @@ -4023,7 +3930,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) int high_dma = 0; u64 vpath_mask = 0; struct vxgedev *vdev; - struct vxge_config ll_config; + struct vxge_config *ll_config = NULL; struct vxge_hw_device_config *device_config = NULL; struct vxge_hw_device_attr attr; int i, j, no_of_vpath = 0, max_vpath_supported = 0; @@ -4082,17 +3989,24 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit0; } - memset(&ll_config, 0, sizeof(struct vxge_config)); - ll_config.tx_steering_type = TX_MULTIQ_STEERING; - ll_config.intr_type = MSI_X; - ll_config.napi_weight = NEW_NAPI_WEIGHT; - ll_config.rth_steering = RTH_STEERING; + ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); + if (!ll_config) { + ret = -ENOMEM; + vxge_debug_init(VXGE_ERR, + "ll_config : malloc failed %s %d", + __FILE__, __LINE__); + goto _exit0; + } + ll_config->tx_steering_type = TX_MULTIQ_STEERING; + ll_config->intr_type = MSI_X; + ll_config->napi_weight = NEW_NAPI_WEIGHT; + ll_config->rth_steering = RTH_STEERING; /* get the default configuration parameters */ vxge_hw_device_config_default_get(device_config); /* initialize configuration parameters */ - vxge_device_config_init(device_config, &ll_config.intr_type); + vxge_device_config_init(device_config, &ll_config->intr_type); ret = pci_enable_device(pdev); if (ret) { @@ -4145,7 +4059,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) (unsigned long long)pci_resource_start(pdev, 0)); status = vxge_hw_device_hw_info_get(attr.bar0, - &ll_config.device_hw_info); + &ll_config->device_hw_info); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: Reading of hardware info failed." @@ -4154,7 +4068,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit3; } - if (ll_config.device_hw_info.fw_version.major != + if (ll_config->device_hw_info.fw_version.major != VXGE_DRIVER_FW_VERSION_MAJOR) { vxge_debug_init(VXGE_ERR, "%s: Incorrect firmware version." @@ -4164,7 +4078,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit3; } - vpath_mask = ll_config.device_hw_info.vpath_mask; + vpath_mask = ll_config->device_hw_info.vpath_mask; if (vpath_mask == 0) { vxge_debug_ll_config(VXGE_TRACE, "%s: No vpaths available in device", VXGE_DRIVER_NAME); @@ -4176,10 +4090,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) "%s:%d Vpath mask = %llx", __func__, __LINE__, (unsigned long long)vpath_mask); - function_mode = ll_config.device_hw_info.function_mode; - host_type = ll_config.device_hw_info.host_type; + function_mode = ll_config->device_hw_info.function_mode; + host_type = ll_config->device_hw_info.host_type; is_privileged = __vxge_hw_device_is_privilaged(host_type, - ll_config.device_hw_info.func_id); + ll_config->device_hw_info.func_id); /* Check how many vpaths are available */ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { @@ -4193,7 +4107,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ if (is_sriov(function_mode) && (max_config_dev > 1) && - (ll_config.intr_type != INTA) && + (ll_config->intr_type != INTA) && (is_privileged == VXGE_HW_OK)) { ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) ? (max_config_dev - 1) : num_vfs); @@ -4206,7 +4120,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) * Configure vpaths and get driver configured number of vpaths * which is less than or equal to the maximum vpaths per function. */ - no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config); + no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config); if (!no_of_vpath) { vxge_debug_ll_config(VXGE_ERR, "%s: No more vpaths to configure", VXGE_DRIVER_NAME); @@ -4241,21 +4155,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) /* set private device info */ pci_set_drvdata(pdev, hldev); - ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; - ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; - ll_config.addr_learn_en = addr_learn_en; - ll_config.rth_algorithm = RTH_ALG_JENKINS; - ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; - ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; - ll_config.rth_bkt_sz = RTH_BUCKET_SIZE; - ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; - ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; - - if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath, + ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; + ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; + ll_config->addr_learn_en = addr_learn_en; + ll_config->rth_algorithm = RTH_ALG_JENKINS; + ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; + ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; + ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; + ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; + + if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, &vdev)) { ret = -EINVAL; goto _exit4; @@ -4281,12 +4195,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) vdev->vpaths[j].is_configured = 1; vdev->vpaths[j].device_id = i; - vdev->vpaths[j].fifo.driver_id = j; vdev->vpaths[j].ring.driver_id = j; vdev->vpaths[j].vdev = vdev; vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; memcpy((u8 *)vdev->vpaths[j].macaddr, - (u8 *)ll_config.device_hw_info.mac_addrs[i], + ll_config->device_hw_info.mac_addrs[i], ETH_ALEN); /* Initialize the mac address list header */ @@ -4307,18 +4220,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) macaddr = (u8 *)vdev->vpaths[0].macaddr; - ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; - ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; - ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; + ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; + ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; + ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", - vdev->ndev->name, ll_config.device_hw_info.serial_number); + vdev->ndev->name, ll_config->device_hw_info.serial_number); vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", - vdev->ndev->name, ll_config.device_hw_info.part_number); + vdev->ndev->name, ll_config->device_hw_info.part_number); vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", - vdev->ndev->name, ll_config.device_hw_info.product_desc); + vdev->ndev->name, ll_config->device_hw_info.product_desc); vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", vdev->ndev->name, macaddr); @@ -4328,11 +4241,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) vxge_debug_init(VXGE_TRACE, "%s: Firmware version : %s Date : %s", vdev->ndev->name, - ll_config.device_hw_info.fw_version.version, - ll_config.device_hw_info.fw_date.date); + ll_config->device_hw_info.fw_version.version, + ll_config->device_hw_info.fw_date.date); if (new_device) { - switch (ll_config.device_hw_info.function_mode) { + switch (ll_config->device_hw_info.function_mode) { case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: vxge_debug_init(VXGE_TRACE, "%s: Single Function Mode Enabled", vdev->ndev->name); @@ -4355,7 +4268,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) vxge_print_parm(vdev, vpath_mask); /* Store the fw version for ethttool option */ - strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version); + strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); @@ -4394,7 +4307,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) * present to prevent such a failure. */ - if (ll_config.device_hw_info.function_mode == + if (ll_config->device_hw_info.function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) if (vdev->config.intr_type == INTA) vxge_hw_device_unmask_all(hldev); @@ -4406,6 +4319,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), vxge_hw_device_trace_level_get(hldev)); + kfree(ll_config); return 0; _exit5: @@ -4423,6 +4337,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) _exit1: pci_disable_device(pdev); _exit0: + kfree(ll_config); kfree(device_config); driver_config->config_dev_cnt--; pci_set_drvdata(pdev, NULL); @@ -4514,13 +4429,9 @@ static int __init vxge_starter(void) { int ret = 0; - char version[32]; - snprintf(version, 32, "%s", DRV_VERSION); - printk(KERN_INFO "%s: Copyright(c) 2002-2009 Neterion Inc\n", - VXGE_DRIVER_NAME); - printk(KERN_INFO "%s: Driver version: %s\n", - VXGE_DRIVER_NAME, version); + pr_info("Copyright(c) 2002-2010 Exar Corp.\n"); + pr_info("Driver version: %s\n", DRV_VERSION); verify_bandwidth(); diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index 60276b20fa5ecba7b348a37a32d292f464d35895..2e3b064b8e4ba0322772f5487a6524e28a9f0665 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-main.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #ifndef VXGE_MAIN_H #define VXGE_MAIN_H @@ -217,21 +217,13 @@ struct vxge_fifo_stats { }; struct vxge_fifo { - struct net_device *ndev; - struct pci_dev *pdev; + struct net_device *ndev; + struct pci_dev *pdev; struct __vxge_hw_fifo *handle; + struct netdev_queue *txq; - /* The vpath id maintained in the driver - - * 0 to 'maximum_vpaths_in_function - 1' - */ - int driver_id; int tx_steering_type; int indicate_max_pkts; - spinlock_t tx_lock; - /* flag used to maintain queue state when MULTIQ is not enabled */ -#define VPATH_QUEUE_START 0 -#define VPATH_QUEUE_STOP 1 - int queue_state; /* Tx stats */ struct vxge_fifo_stats stats; @@ -279,7 +271,6 @@ struct vxge_ring { } ____cacheline_aligned; struct vxge_vpath { - struct vxge_fifo fifo; struct vxge_ring ring; @@ -447,14 +438,6 @@ int vxge_open_vpaths(struct vxgedev *vdev); enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); -void vxge_stop_all_tx_queue(struct vxgedev *vdev); - -void vxge_stop_tx_queue(struct vxge_fifo *fifo); - -void vxge_start_all_tx_queue(struct vxgedev *vdev); - -void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb); - enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac); diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h index 9a0cf8eaa328b8dacb643874fdcd0647e96055ca..3dd5c9615ef9a05e77bf023fef62379778cf3761 100644 --- a/drivers/net/vxge/vxge-reg.h +++ b/drivers/net/vxge/vxge-reg.h @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-reg.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O Virtualized + * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized * Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #ifndef VXGE_REG_H #define VXGE_REG_H diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 6cc1dd79b40b2a134f1d0044009a3ba9746ed2bb..cedf08f99cb3dbf2a2b94e198ef168c4102dc6a5 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #include @@ -2466,14 +2466,12 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) * the same. * @fifo: Handle to the fifo object used for non offload send * - * The function polls the Tx for the completed descriptors and calls + * The function polls the Tx for the completed descriptors and calls * the driver via supplied completion callback. * * Returns: VXGE_HW_OK, if the polling is completed successful. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed * descriptors available which are yet to be processed. - * - * See also: vxge_hw_vpath_poll_tx(). */ enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, struct sk_buff ***skb_ptr, int nr_skb, diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index c252f3d3f65070a9e6d06499177462916e8b3e9b..6fa07d13798e7aef3efec0ace046cd794c169a65 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h @@ -7,9 +7,9 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-traffic.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #ifndef VXGE_TRAFFIC_H #define VXGE_TRAFFIC_H diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h index 5da7ab1fd307638ea564b8aa750917d5370687f9..53fefe13736875650952d7f748c79edbc1acb8c7 100644 --- a/drivers/net/vxge/vxge-version.h +++ b/drivers/net/vxge/vxge-version.h @@ -7,17 +7,16 @@ * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * - * vxge-version.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. - * Copyright(c) 2002-2009 Neterion Inc. + * Copyright(c) 2002-2010 Exar Corp. ******************************************************************************/ #ifndef VXGE_VERSION_H - #define VXGE_VERSION_H #define VXGE_VERSION_MAJOR "2" #define VXGE_VERSION_MINOR "0" -#define VXGE_VERSION_FIX "8" -#define VXGE_VERSION_BUILD "20182" +#define VXGE_VERSION_FIX "9" +#define VXGE_VERSION_BUILD "20840" #define VXGE_VERSION_FOR "k" #endif diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index f0bd70fb650cbd669a15cc07a6aaa02116f9f684..04c6cd4333f1ec83d69be506b040b75183cdfb81 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -89,7 +89,6 @@ #include #include #include -#include #include #include #include @@ -174,6 +173,7 @@ struct cosa_data { * Character device major number. 117 was allocated for us. * The value of 0 means to allocate a first free one. */ +static DEFINE_MUTEX(cosa_chardev_mutex); static int cosa_major = 117; /* @@ -944,7 +944,7 @@ static int cosa_open(struct inode *inode, struct file *file) int n; int ret = 0; - lock_kernel(); + mutex_lock(&cosa_chardev_mutex); if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS) >= nr_cards) { ret = -ENODEV; @@ -976,7 +976,7 @@ static int cosa_open(struct inode *inode, struct file *file) chan->rx_done = chrdev_rx_done; spin_unlock_irqrestore(&cosa->lock, flags); out: - unlock_kernel(); + mutex_unlock(&cosa_chardev_mutex); return ret; } @@ -1212,10 +1212,10 @@ static long cosa_chardev_ioctl(struct file *file, unsigned int cmd, struct cosa_data *cosa; long ret; - lock_kernel(); + mutex_lock(&cosa_chardev_mutex); cosa = channel->cosa; ret = cosa_ioctl_common(cosa, channel, cmd, arg); - unlock_kernel(); + mutex_unlock(&cosa_chardev_mutex); return ret; } diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index e087b9a6daaa50078fa761acc2deabbb70b315b5..ad7719fe6d0a2ced65c2db0a27faeeab6550486a 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -15,6 +15,8 @@ * Maintainer: Kevin Curtis */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -511,21 +513,19 @@ static int fst_debug_mask = { FST_DEBUG }; * support variable numbers of macro parameters. The inverted if prevents us * eating someone else's else clause. */ -#define dbg(F,fmt,A...) if ( ! ( fst_debug_mask & (F))) \ - ; \ - else \ - printk ( KERN_DEBUG FST_NAME ": " fmt, ## A ) - +#define dbg(F, fmt, args...) \ +do { \ + if (fst_debug_mask & (F)) \ + printk(KERN_DEBUG pr_fmt(fmt), ##args); \ +} while (0) #else -#define dbg(X...) /* NOP */ +#define dbg(F, fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG pr_fmt(fmt), ##args); \ +} while (0) #endif -/* Printing short cuts - */ -#define printk_err(fmt,A...) printk ( KERN_ERR FST_NAME ": " fmt, ## A ) -#define printk_warn(fmt,A...) printk ( KERN_WARNING FST_NAME ": " fmt, ## A ) -#define printk_info(fmt,A...) printk ( KERN_INFO FST_NAME ": " fmt, ## A ) - /* * PCI ID lookup table */ @@ -961,7 +961,7 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd) spin_lock_irqsave(&card->card_lock, flags); if (++safety > 2000) { - printk_err("Mailbox safety timeout\n"); + pr_err("Mailbox safety timeout\n"); break; } @@ -1241,8 +1241,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port) * This seems to happen on the TE1 interface sometimes * so throw the frame away and log the event. */ - printk_err("Frame received with 0 length. Card %d Port %d\n", - card->card_no, port->index); + pr_err("Frame received with 0 length. Card %d Port %d\n", + card->card_no, port->index); /* Return descriptor to card */ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); @@ -1486,9 +1486,8 @@ fst_intr(int dummy, void *dev_id) */ dbg(DBG_INTR, "intr: %d %p\n", card->irq, card); if (card->state != FST_RUNNING) { - printk_err - ("Interrupt received for card %d in a non running state (%d)\n", - card->card_no, card->state); + pr_err("Interrupt received for card %d in a non running state (%d)\n", + card->card_no, card->state); /* * It is possible to really be running, i.e. we have re-loaded @@ -1614,8 +1613,7 @@ fst_intr(int dummy, void *dev_id) break; default: - printk_err("intr: unknown card event %d. ignored\n", - event); + pr_err("intr: unknown card event %d. ignored\n", event); break; } @@ -1637,13 +1635,13 @@ check_started_ok(struct fst_card_info *card) /* Check structure version and end marker */ if (FST_RDW(card, smcVersion) != SMC_VERSION) { - printk_err("Bad shared memory version %d expected %d\n", - FST_RDW(card, smcVersion), SMC_VERSION); + pr_err("Bad shared memory version %d expected %d\n", + FST_RDW(card, smcVersion), SMC_VERSION); card->state = FST_BADVERSION; return; } if (FST_RDL(card, endOfSmcSignature) != END_SIG) { - printk_err("Missing shared memory signature\n"); + pr_err("Missing shared memory signature\n"); card->state = FST_BADVERSION; return; } @@ -1651,11 +1649,11 @@ check_started_ok(struct fst_card_info *card) if ((i = FST_RDB(card, taskStatus)) == 0x01) { card->state = FST_RUNNING; } else if (i == 0xFF) { - printk_err("Firmware initialisation failed. Card halted\n"); + pr_err("Firmware initialisation failed. Card halted\n"); card->state = FST_HALTED; return; } else if (i != 0x00) { - printk_err("Unknown firmware status 0x%x\n", i); + pr_err("Unknown firmware status 0x%x\n", i); card->state = FST_HALTED; return; } @@ -1665,9 +1663,10 @@ check_started_ok(struct fst_card_info *card) * existing firmware etc so we just report it for the moment. */ if (FST_RDL(card, numberOfPorts) != card->nports) { - printk_warn("Port count mismatch on card %d." - " Firmware thinks %d we say %d\n", card->card_no, - FST_RDL(card, numberOfPorts), card->nports); + pr_warning("Port count mismatch on card %d. " + "Firmware thinks %d we say %d\n", + card->card_no, + FST_RDL(card, numberOfPorts), card->nports); } } @@ -2038,16 +2037,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Now copy the data to the card. */ - buf = kmalloc(wrthdr.size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, - ifr->ifr_data + sizeof (struct fstioc_write), - wrthdr.size)) { - kfree(buf); - return -EFAULT; - } + buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write), + wrthdr.size); + if (IS_ERR(buf)) + return PTR_ERR(buf); memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size); kfree(buf); @@ -2096,9 +2089,8 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) */ if (card->state != FST_RUNNING) { - printk_err - ("Attempt to configure card %d in non-running state (%d)\n", - card->card_no, card->state); + pr_err("Attempt to configure card %d in non-running state (%d)\n", + card->card_no, card->state); return -EIO; } if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) { @@ -2390,8 +2382,8 @@ fst_init_card(struct fst_card_info *card) err = register_hdlc_device(card->ports[i].dev); if (err < 0) { int j; - printk_err ("Cannot register HDLC device for port %d" - " (errno %d)\n", i, -err ); + pr_err("Cannot register HDLC device for port %d (errno %d)\n", + i, -err); for (j = i; j < card->nports; j++) { free_netdev(card->ports[j].dev); card->ports[j].dev = NULL; @@ -2401,10 +2393,10 @@ fst_init_card(struct fst_card_info *card) } } - printk_info("%s-%s: %s IRQ%d, %d ports\n", - port_to_dev(&card->ports[0])->name, - port_to_dev(&card->ports[card->nports - 1])->name, - type_strings[card->type], card->irq, card->nports); + pr_info("%s-%s: %s IRQ%d, %d ports\n", + port_to_dev(&card->ports[0])->name, + port_to_dev(&card->ports[card->nports - 1])->name, + type_strings[card->type], card->irq, card->nports); } static const struct net_device_ops fst_ops = { @@ -2423,19 +2415,17 @@ static const struct net_device_ops fst_ops = { static int __devinit fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int firsttime_done = 0; static int no_of_cards_added = 0; struct fst_card_info *card; int err = 0; int i; - if (!firsttime_done) { - printk_info("FarSync WAN driver " FST_USER_VERSION - " (c) 2001-2004 FarSite Communications Ltd.\n"); - firsttime_done = 1; - dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask); - } - + printk_once(KERN_INFO + pr_fmt("FarSync WAN driver " FST_USER_VERSION + " (c) 2001-2004 FarSite Communications Ltd.\n")); +#if FST_DEBUG + dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask); +#endif /* * We are going to be clever and allow certain cards not to be * configured. An exclude list can be provided in /etc/modules.conf @@ -2447,8 +2437,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ for (i = 0; i < fst_excluded_cards; i++) { if ((pdev->devfn) >> 3 == fst_excluded_list[i]) { - printk_info("FarSync PCI device %d not assigned\n", - (pdev->devfn) >> 3); + pr_info("FarSync PCI device %d not assigned\n", + (pdev->devfn) >> 3); return -EBUSY; } } @@ -2457,20 +2447,19 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Allocate driver private data */ card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL); if (card == NULL) { - printk_err("FarSync card found but insufficient memory for" - " driver storage\n"); + pr_err("FarSync card found but insufficient memory for driver storage\n"); return -ENOMEM; } /* Try to enable the device */ if ((err = pci_enable_device(pdev)) != 0) { - printk_err("Failed to enable card. Err %d\n", -err); + pr_err("Failed to enable card. Err %d\n", -err); kfree(card); return err; } if ((err = pci_request_regions(pdev, "FarSync")) !=0) { - printk_err("Failed to allocate regions. Err %d\n", -err); + pr_err("Failed to allocate regions. Err %d\n", -err); pci_disable_device(pdev); kfree(card); return err; @@ -2481,14 +2470,14 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) card->phys_mem = pci_resource_start(pdev, 2); card->phys_ctlmem = pci_resource_start(pdev, 3); if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { - printk_err("Physical memory remap failed\n"); + pr_err("Physical memory remap failed\n"); pci_release_regions(pdev); pci_disable_device(pdev); kfree(card); return -ENODEV; } if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { - printk_err("Control memory remap failed\n"); + pr_err("Control memory remap failed\n"); pci_release_regions(pdev); pci_disable_device(pdev); kfree(card); @@ -2498,7 +2487,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Register the interrupt handler */ if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { - printk_err("Unable to register interrupt %d\n", card->irq); + pr_err("Unable to register interrupt %d\n", card->irq); pci_release_regions(pdev); pci_disable_device(pdev); iounmap(card->ctlmem); @@ -2529,7 +2518,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!dev) { while (i--) free_netdev(card->ports[i].dev); - printk_err ("FarSync: out of memory\n"); + pr_err("FarSync: out of memory\n"); free_irq(card->irq, card); pci_release_regions(pdev); pci_disable_device(pdev); @@ -2593,7 +2582,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_alloc_consistent(card->device, FST_MAX_MTU, &card->rx_dma_handle_card); if (card->rx_dma_handle_host == NULL) { - printk_err("Could not allocate rx dma buffer\n"); + pr_err("Could not allocate rx dma buffer\n"); fst_disable_intr(card); pci_release_regions(pdev); pci_disable_device(pdev); @@ -2606,7 +2595,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_alloc_consistent(card->device, FST_MAX_MTU, &card->tx_dma_handle_card); if (card->tx_dma_handle_host == NULL) { - printk_err("Could not allocate tx dma buffer\n"); + pr_err("Could not allocate tx dma buffer\n"); fst_disable_intr(card); pci_release_regions(pdev); pci_disable_device(pdev); @@ -2678,7 +2667,7 @@ fst_init(void) static void __exit fst_cleanup_module(void) { - printk_info("FarSync WAN driver unloading\n"); + pr_info("FarSync WAN driver unloading\n"); pci_unregister_driver(&fst_driver); } diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h index 3839662ff20148caa648dacd88ef71a97cc98f53..e4f539ad071b7cb75d55ba9b8d27107ce9b52a61 100644 --- a/drivers/net/wan/hd64570.h +++ b/drivers/net/wan/hd64570.h @@ -153,7 +153,7 @@ typedef struct { u16 len; /* Data Length */ u8 stat; /* Status */ u8 unused; /* pads to 2-byte boundary */ -}__attribute__ ((packed)) pkt_desc; +}__packed pkt_desc; /* Packet Descriptor Status bits */ diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index ee7083fbea50708a076c68af7e289320cb4d355c..b38ffa149aba218200c64194ae0a1a597dfc9113 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -36,7 +36,7 @@ struct hdlc_header { u8 address; u8 control; __be16 protocol; -}__attribute__ ((packed)); +}__packed; struct cisco_packet { @@ -45,7 +45,7 @@ struct cisco_packet { __be32 par2; __be16 rel; /* reliability */ __be32 time; -}__attribute__ ((packed)); +}__packed; #define CISCO_PACKET_LEN 18 #define CISCO_BIG_PACKET_LEN 20 diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 0e52993e20798ff763dceb3d5a34fccdba111e5c..0edb535bb2b5a746c624291288b9e973c7a76c50 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -112,7 +112,7 @@ typedef struct { unsigned de: 1; unsigned ea2: 1; #endif -}__attribute__ ((packed)) fr_hdr; +}__packed fr_hdr; typedef struct pvc_device_struct { diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 43ae6f440bfb179fbc4ee3ec59d20dfdd0a38331..f4125da2762fe9c395dcf479a91303a8c6892bbd 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -330,7 +330,7 @@ struct _dlci_stat { short dlci; char flags; -} __attribute__((packed)); +} __packed; struct _frad_stat { @@ -1211,14 +1211,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r } else { - temp = kmalloc(mem.len, GFP_KERNEL); - if (!temp) - return(-ENOMEM); - if(copy_from_user(temp, mem.data, mem.len)) - { - kfree(temp); - return -EFAULT; - } + temp = memdup_user(mem.data, mem.len); + if (IS_ERR(temp)) + return PTR_ERR(temp); sdla_write(dev, mem.addr, temp, mem.len); kfree(temp); } diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 746a5ee32f3356cf82de27fe583910f3053526e2..eb72c67699abb50f30e8b8bf7d30b43c52b73daa 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c @@ -358,8 +358,10 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) #endif err = register_netdev(dev); - if (err) + if (err) { free_irq(dev->irq, dev); + iounmap(ei_status.mem); + } return err; } diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index d86e8f31e7fcfc8b0b3bc1e1a46cc6f35cb8bfdb..2f725d0cc7624ac55d9c3fbb29e936337538a230 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -848,7 +848,7 @@ struct i2400m_cmd_enter_power_save { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_hdr tlv; __le32 val; -} __attribute__((packed)); +} __packed; /* diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 11491354e5b5bf3de80d0b91ee13d7a203a0e914..8b55a5b14152adee9c966f0d6705dddfad769ffb 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -651,7 +651,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk, struct { struct i2400m_bootrom_header cmd; u8 cmd_payload[chunk_len]; - } __attribute__((packed)) *buf; + } __packed *buf; struct i2400m_bootrom_header ack; d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx " @@ -794,7 +794,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m, struct { struct i2400m_bootrom_header cmd; u8 cmd_pl[0]; - } __attribute__((packed)) *cmd_buf; + } __packed *cmd_buf; size_t signature_block_offset, signature_block_size; d_fnstart(3, dev, "offset %zu\n", offset); @@ -1029,7 +1029,7 @@ int i2400m_read_mac_addr(struct i2400m *i2400m) struct { struct i2400m_bootrom_header ack; u8 ack_pl[16]; - } __attribute__((packed)) ack_buf; + } __packed ack_buf; d_fnstart(5, dev, "(i2400m %p)\n", i2400m); cmd = i2400m->bm_cmd_buf; @@ -1115,7 +1115,7 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m, struct { struct i2400m_bootrom_header cmd; struct i2400m_bcf_hdr cmd_pl; - } __attribute__((packed)) *cmd_buf; + } __packed *cmd_buf; struct i2400m_bootrom_header ack; d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr); diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 035e4cf3e6ed1a9b7d7b0184dc4f95128b366b0a..9e02b90b008001c3bc65431697560211df39e774 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -91,7 +91,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, struct { struct i2400m_l3l4_hdr hdr; struct i2400m_tlv_rf_operation sw_rf; - } __attribute__((packed)) *cmd; + } __packed *cmd; char strerr[32]; d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state); diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index 880ad9d170c2cae4687df80e3fc4e090db6120d8..a105087af9639884c1ca773ba96cf096fdde7954 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -373,8 +373,8 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev) pktlen = status & RDES0_STATUS_FL; if (pktlen > RX_PKT_SIZE) { if (net_ratelimit()) - printk(KERN_DEBUG "%s: frame too long (%d)\n", - wiphy_name(dev->wiphy), pktlen); + wiphy_debug(dev->wiphy, "frame too long (%d)\n", + pktlen); pktlen = RX_PKT_SIZE; } @@ -454,10 +454,10 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev) static irqreturn_t adm8211_interrupt(int irq, void *dev_id) { -#define ADM8211_INT(x) \ -do { \ - if (unlikely(stsr & ADM8211_STSR_ ## x)) \ - printk(KERN_DEBUG "%s: " #x "\n", wiphy_name(dev->wiphy)); \ +#define ADM8211_INT(x) \ +do { \ + if (unlikely(stsr & ADM8211_STSR_ ## x)) \ + wiphy_debug(dev->wiphy, "%s\n", #x); \ } while (0) struct ieee80211_hw *dev = dev_id; @@ -570,9 +570,9 @@ static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data) } if (timeout == 0) { - printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" - " prewrite (reg=0x%08x)\n", - wiphy_name(dev->wiphy), addr, data, reg); + wiphy_debug(dev->wiphy, + "adm8211_write_bbp(%d,%d) failed prewrite (reg=0x%08x)\n", + addr, data, reg); return -ETIMEDOUT; } @@ -605,9 +605,9 @@ static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data) if (timeout == 0) { ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) & ~ADM8211_BBPCTL_WR); - printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" - " postwrite (reg=0x%08x)\n", - wiphy_name(dev->wiphy), addr, data, reg); + wiphy_debug(dev->wiphy, + "adm8211_write_bbp(%d,%d) failed postwrite (reg=0x%08x)\n", + addr, data, reg); return -ETIMEDOUT; } @@ -675,8 +675,8 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) break; default: - printk(KERN_DEBUG "%s: unsupported transceiver type %d\n", - wiphy_name(dev->wiphy), priv->transceiver_type); + wiphy_debug(dev->wiphy, "unsupported transceiver type %d\n", + priv->transceiver_type); break; } @@ -732,8 +732,8 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) /* Nothing to do for ADMtek BBP */ } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) - printk(KERN_DEBUG "%s: unsupported BBP type %d\n", - wiphy_name(dev->wiphy), priv->bbp_type); + wiphy_debug(dev->wiphy, "unsupported bbp type %d\n", + priv->bbp_type); ADM8211_RESTORE(); @@ -1027,13 +1027,12 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) break; default: - printk(KERN_DEBUG "%s: unsupported transceiver %d\n", - wiphy_name(dev->wiphy), priv->transceiver_type); + wiphy_debug(dev->wiphy, "unsupported transceiver %d\n", + priv->transceiver_type); break; } } else - printk(KERN_DEBUG "%s: unsupported BBP %d\n", - wiphy_name(dev->wiphy), priv->bbp_type); + wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type); ADM8211_CSR_WRITE(SYNRF, 0); @@ -1509,15 +1508,13 @@ static int adm8211_start(struct ieee80211_hw *dev) /* Power up MAC and RF chips */ retval = adm8211_hw_reset(dev); if (retval) { - printk(KERN_ERR "%s: hardware reset failed\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "hardware reset failed\n"); goto fail; } retval = adm8211_init_rings(dev); if (retval) { - printk(KERN_ERR "%s: failed to initialize rings\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "failed to initialize rings\n"); goto fail; } @@ -1528,8 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev) retval = request_irq(priv->pdev->irq, adm8211_interrupt, IRQF_SHARED, "adm8211", dev); if (retval) { - printk(KERN_ERR "%s: failed to register IRQ handler\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "failed to register irq handler\n"); goto fail; } @@ -1903,15 +1899,17 @@ static int __devinit adm8211_probe(struct pci_dev *pdev, if (err) { printk(KERN_ERR "%s (adm8211): Cannot register device\n", pci_name(pdev)); - goto err_free_desc; + goto err_free_eeprom; } - printk(KERN_INFO "%s: hwaddr %pM, Rev 0x%02x\n", - wiphy_name(dev->wiphy), dev->wiphy->perm_addr, - pdev->revision); + wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n", + dev->wiphy->perm_addr, pdev->revision); return 0; + err_free_eeprom: + kfree(priv->eeprom); + err_free_desc: pci_free_consistent(pdev, sizeof(struct adm8211_desc) * priv->rx_ring_size + diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h index b07e4d3a6b4dda728ec598ae2689d180887556fe..bbc10b1cde876e2752ae3b2f7b6770997844fcda 100644 --- a/drivers/net/wireless/adm8211.h +++ b/drivers/net/wireless/adm8211.h @@ -80,7 +80,7 @@ struct adm8211_csr { __le32 FEMR; /* 0x104 */ __le32 FPSR; /* 0x108 */ __le32 FFER; /* 0x10C */ -} __attribute__ ((packed)); +} __packed; /* CSR0 - PAR (PCI Address Register) */ #define ADM8211_PAR_MWIE (1 << 24) @@ -484,7 +484,7 @@ struct adm8211_tx_hdr { u8 entry_control; // huh?? u16 reserved_1; u32 reserved_2; -} __attribute__ ((packed)); +} __packed; #define RX_COPY_BREAK 128 @@ -531,7 +531,7 @@ struct adm8211_eeprom { u8 lnags_threshold[14]; /* 0x70 */ __le16 checksum; /* 0x7E */ u8 cis_data[0]; /* 0x80, 384 bytes */ -} __attribute__ ((packed)); +} __packed; struct adm8211_priv { struct pci_dev *pdev; diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 3b7ab20a5c54ca77fcbf04db0e0095ebc1ef5c5c..1d05445d4ba397cb04545d1ba105e1040c864176 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -506,20 +506,20 @@ struct WepKeyRid { u8 mac[ETH_ALEN]; __le16 klen; u8 key[16]; -} __attribute__ ((packed)); +} __packed; /* These structures are from the Aironet's PC4500 Developers Manual */ typedef struct Ssid Ssid; struct Ssid { __le16 len; u8 ssid[32]; -} __attribute__ ((packed)); +} __packed; typedef struct SsidRid SsidRid; struct SsidRid { __le16 len; Ssid ssids[3]; -} __attribute__ ((packed)); +} __packed; typedef struct ModulationRid ModulationRid; struct ModulationRid { @@ -528,7 +528,7 @@ struct ModulationRid { #define MOD_DEFAULT cpu_to_le16(0) #define MOD_CCK cpu_to_le16(1) #define MOD_MOK cpu_to_le16(2) -} __attribute__ ((packed)); +} __packed; typedef struct ConfigRid ConfigRid; struct ConfigRid { @@ -652,7 +652,7 @@ struct ConfigRid { #define MAGIC_STAY_IN_CAM (1<<10) u8 magicControl; __le16 autoWake; -} __attribute__ ((packed)); +} __packed; typedef struct StatusRid StatusRid; struct StatusRid { @@ -711,20 +711,20 @@ struct StatusRid { #define STAT_LEAPFAILED 91 #define STAT_LEAPTIMEDOUT 92 #define STAT_LEAPCOMPLETE 93 -} __attribute__ ((packed)); +} __packed; typedef struct StatsRid StatsRid; struct StatsRid { __le16 len; __le16 spacer; __le32 vals[100]; -} __attribute__ ((packed)); +} __packed; typedef struct APListRid APListRid; struct APListRid { __le16 len; u8 ap[4][ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; typedef struct CapabilityRid CapabilityRid; struct CapabilityRid { @@ -754,7 +754,7 @@ struct CapabilityRid { __le16 bootBlockVer; __le16 requiredHard; __le16 extSoftCap; -} __attribute__ ((packed)); +} __packed; /* Only present on firmware >= 5.30.17 */ typedef struct BSSListRidExtra BSSListRidExtra; @@ -762,7 +762,7 @@ struct BSSListRidExtra { __le16 unknown[4]; u8 fixed[12]; /* WLAN management frame */ u8 iep[624]; -} __attribute__ ((packed)); +} __packed; typedef struct BSSListRid BSSListRid; struct BSSListRid { @@ -796,7 +796,7 @@ struct BSSListRid { /* Only present on firmware >= 5.30.17 */ BSSListRidExtra extra; -} __attribute__ ((packed)); +} __packed; typedef struct { BSSListRid bss; @@ -807,13 +807,13 @@ typedef struct tdsRssiEntry tdsRssiEntry; struct tdsRssiEntry { u8 rssipct; u8 rssidBm; -} __attribute__ ((packed)); +} __packed; typedef struct tdsRssiRid tdsRssiRid; struct tdsRssiRid { u16 len; tdsRssiEntry x[256]; -} __attribute__ ((packed)); +} __packed; typedef struct MICRid MICRid; struct MICRid { @@ -823,7 +823,7 @@ struct MICRid { u8 multicast[16]; __le16 unicastValid; u8 unicast[16]; -} __attribute__ ((packed)); +} __packed; typedef struct MICBuffer MICBuffer; struct MICBuffer { @@ -841,7 +841,7 @@ struct MICBuffer { } u; __be32 mic; __be32 seq; -} __attribute__ ((packed)); +} __packed; typedef struct { u8 da[ETH_ALEN]; @@ -996,7 +996,7 @@ struct rx_hdr { u8 rate; u8 freq; __le16 tmp[4]; -} __attribute__ ((packed)); +} __packed; typedef struct { unsigned int ctl: 15; @@ -2931,8 +2931,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, release_region( dev->base_addr, 64 ); err_out_nets: airo_networks_free(ai); - del_airo_dev(ai); err_out_free: + del_airo_dev(ai); free_netdev(dev); return NULL; } @@ -4657,7 +4657,7 @@ static ssize_t proc_write( struct file *file, loff_t *offset ) { loff_t pos = *offset; - struct proc_data *priv = (struct proc_data*)file->private_data; + struct proc_data *priv = file->private_data; if (!priv->wbuffer) return -EINVAL; @@ -4689,7 +4689,7 @@ static int proc_status_open(struct inode *inode, struct file *file) if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; - data = (struct proc_data *)file->private_data; + data = file->private_data; if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; @@ -4772,7 +4772,7 @@ static int proc_stats_rid_open( struct inode *inode, if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; - data = (struct proc_data *)file->private_data; + data = file->private_data; if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; @@ -5045,7 +5045,7 @@ static int proc_config_open(struct inode *inode, struct file *file) if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; - data = (struct proc_data *)file->private_data; + data = file->private_data; if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; @@ -5127,7 +5127,7 @@ static int proc_config_open(struct inode *inode, struct file *file) static void proc_SSID_on_close(struct inode *inode, struct file *file) { - struct proc_data *data = (struct proc_data *)file->private_data; + struct proc_data *data = file->private_data; struct proc_dir_entry *dp = PDE(inode); struct net_device *dev = dp->data; struct airo_info *ai = dev->ml_priv; @@ -5163,7 +5163,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file) } static void proc_APList_on_close( struct inode *inode, struct file *file ) { - struct proc_data *data = (struct proc_data *)file->private_data; + struct proc_data *data = file->private_data; struct proc_dir_entry *dp = PDE(inode); struct net_device *dev = dp->data; struct airo_info *ai = dev->ml_priv; @@ -5309,7 +5309,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) { memset(key, 0, sizeof(key)); - data = (struct proc_data *)file->private_data; + data = file->private_data; if ( !data->writelen ) return; if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' && @@ -5363,7 +5363,7 @@ static int proc_wepkey_open( struct inode *inode, struct file *file ) if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; memset(&wkr, 0, sizeof(wkr)); - data = (struct proc_data *)file->private_data; + data = file->private_data; if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; @@ -5409,7 +5409,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file) if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; - data = (struct proc_data *)file->private_data; + data = file->private_data; if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; @@ -5453,7 +5453,7 @@ static int proc_APList_open( struct inode *inode, struct file *file ) { if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; - data = (struct proc_data *)file->private_data; + data = file->private_data; if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; @@ -5495,7 +5495,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; - data = (struct proc_data *)file->private_data; + data = file->private_data; if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 8a2d4afc74f8dacbb2822b88c441fe0534711ef5..d5140a87f0737d92adde6ce9bb725d679be16538 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -7,6 +7,7 @@ * Copyright (c) 2004 Balint Seeber * Copyright (c) 2007 Guido Guenther * Copyright (c) 2007 Kalle Valo + * Copyright (c) 2010 Sebastian Smolorz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -88,22 +89,19 @@ #define DBG_DEFAULTS 0 /* Use our own dbg macro */ -#define at76_dbg(bits, format, arg...) \ - do { \ - if (at76_debug & (bits)) \ - printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , \ - ## arg); \ - } while (0) - -#define at76_dbg_dump(bits, buf, len, format, arg...) \ - do { \ - if (at76_debug & (bits)) { \ - printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , \ - ## arg); \ - print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, \ - buf, len); \ - } \ - } while (0) +#define at76_dbg(bits, format, arg...) \ +do { \ + if (at76_debug & (bits)) \ + printk(KERN_DEBUG DRIVER_NAME ": " format "\n", ##arg); \ +} while (0) + +#define at76_dbg_dump(bits, buf, len, format, arg...) \ +do { \ + if (at76_debug & (bits)) { \ + printk(KERN_DEBUG DRIVER_NAME ": " format "\n", ##arg); \ + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); \ + } \ +} while (0) static uint at76_debug = DBG_DEFAULTS; @@ -305,7 +303,7 @@ struct dfu_status { unsigned char poll_timeout[3]; unsigned char state; unsigned char string; -} __attribute__((packed)); +} __packed; static inline int at76_is_intersil(enum board_type board) { @@ -657,8 +655,8 @@ static int at76_get_hw_config(struct at76_priv *priv) exit: kfree(hwcfg); if (ret < 0) - printk(KERN_ERR "%s: cannot get HW Config (error %d)\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n", + ret); return ret; } @@ -793,8 +791,9 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd) do { status = at76_get_cmd_status(priv->udev, cmd); if (status < 0) { - printk(KERN_ERR "%s: at76_get_cmd_status failed: %d\n", - wiphy_name(priv->hw->wiphy), status); + wiphy_err(priv->hw->wiphy, + "at76_get_cmd_status failed: %d\n", + status); break; } @@ -809,9 +808,8 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd) schedule_timeout_interruptible(HZ / 10); /* 100 ms */ if (time_after(jiffies, timeout)) { - printk(KERN_ERR - "%s: completion timeout for command %d\n", - wiphy_name(priv->hw->wiphy), cmd); + wiphy_err(priv->hw->wiphy, + "completion timeout for command %d\n", cmd); status = -ETIMEDOUT; break; } @@ -832,9 +830,9 @@ static int at76_set_mib(struct at76_priv *priv, struct set_mib_buffer *buf) ret = at76_wait_completion(priv, CMD_SET_MIB); if (ret != CMD_STATUS_COMPLETE) { - printk(KERN_INFO - "%s: set_mib: at76_wait_completion failed " - "with %d\n", wiphy_name(priv->hw->wiphy), ret); + wiphy_info(priv->hw->wiphy, + "set_mib: at76_wait_completion failed with %d\n", + ret); ret = -EIO; } @@ -854,8 +852,8 @@ static int at76_set_radio(struct at76_priv *priv, int enable) ret = at76_set_card_command(priv->udev, cmd, NULL, 0); if (ret < 0) - printk(KERN_ERR "%s: at76_set_card_command(%d) failed: %d\n", - wiphy_name(priv->hw->wiphy), cmd, ret); + wiphy_err(priv->hw->wiphy, + "at76_set_card_command(%d) failed: %d\n", cmd, ret); else ret = 1; @@ -875,8 +873,8 @@ static int at76_set_pm_mode(struct at76_priv *priv) ret = at76_set_mib(priv, &priv->mib_buf); if (ret < 0) - printk(KERN_ERR "%s: set_mib (pm_mode) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, "set_mib (pm_mode) failed: %d\n", + ret); return ret; } @@ -892,8 +890,8 @@ static int at76_set_preamble(struct at76_priv *priv, u8 type) ret = at76_set_mib(priv, &priv->mib_buf); if (ret < 0) - printk(KERN_ERR "%s: set_mib (preamble) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, "set_mib (preamble) failed: %d\n", + ret); return ret; } @@ -909,8 +907,8 @@ static int at76_set_frag(struct at76_priv *priv, u16 size) ret = at76_set_mib(priv, &priv->mib_buf); if (ret < 0) - printk(KERN_ERR "%s: set_mib (frag threshold) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "set_mib (frag threshold) failed: %d\n", ret); return ret; } @@ -926,8 +924,7 @@ static int at76_set_rts(struct at76_priv *priv, u16 size) ret = at76_set_mib(priv, &priv->mib_buf); if (ret < 0) - printk(KERN_ERR "%s: set_mib (rts) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, "set_mib (rts) failed: %d\n", ret); return ret; } @@ -943,8 +940,8 @@ static int at76_set_autorate_fallback(struct at76_priv *priv, int onoff) ret = at76_set_mib(priv, &priv->mib_buf); if (ret < 0) - printk(KERN_ERR "%s: set_mib (autorate fallback) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "set_mib (autorate fallback) failed: %d\n", ret); return ret; } @@ -962,8 +959,8 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, sizeof(struct mib_mac_addr)); if (ret < 0) { - printk(KERN_ERR "%s: at76_get_mib (MAC_ADDR) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "at76_get_mib (mac_addr) failed: %d\n", ret); goto exit; } @@ -991,8 +988,8 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, sizeof(struct mib_mac_wep)); if (ret < 0) { - printk(KERN_ERR "%s: at76_get_mib (MAC_WEP) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "at76_get_mib (mac_wep) failed: %d\n", ret); goto exit; } @@ -1028,8 +1025,8 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, sizeof(struct mib_mac_mgmt)); if (ret < 0) { - printk(KERN_ERR "%s: at76_get_mib (MAC_MGMT) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "at76_get_mib (mac_mgmt) failed: %d\n", ret); goto exit; } @@ -1064,8 +1061,8 @@ static void at76_dump_mib_mac(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); if (ret < 0) { - printk(KERN_ERR "%s: at76_get_mib (MAC) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "at76_get_mib (mac) failed: %d\n", ret); goto exit; } @@ -1101,8 +1098,8 @@ static void at76_dump_mib_phy(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); if (ret < 0) { - printk(KERN_ERR "%s: at76_get_mib (PHY) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "at76_get_mib (phy) failed: %d\n", ret); goto exit; } @@ -1134,8 +1131,8 @@ static void at76_dump_mib_local(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); if (ret < 0) { - printk(KERN_ERR "%s: at76_get_mib (LOCAL) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "at76_get_mib (local) failed: %d\n", ret); goto exit; } @@ -1160,8 +1157,8 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, sizeof(struct mib_mdomain)); if (ret < 0) { - printk(KERN_ERR "%s: at76_get_mib (MDOMAIN) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "at76_get_mib (mdomain) failed: %d\n", ret); goto exit; } @@ -1232,16 +1229,16 @@ static int at76_submit_rx_urb(struct at76_priv *priv) struct sk_buff *skb = priv->rx_skb; if (!priv->rx_urb) { - printk(KERN_ERR "%s: %s: priv->rx_urb is NULL\n", - wiphy_name(priv->hw->wiphy), __func__); + wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n", + __func__); return -EFAULT; } if (!skb) { skb = dev_alloc_skb(sizeof(struct at76_rx_buffer)); if (!skb) { - printk(KERN_ERR "%s: cannot allocate rx skbuff\n", - wiphy_name(priv->hw->wiphy)); + wiphy_err(priv->hw->wiphy, + "cannot allocate rx skbuff\n"); ret = -ENOMEM; goto exit; } @@ -1260,15 +1257,14 @@ static int at76_submit_rx_urb(struct at76_priv *priv) at76_dbg(DBG_DEVSTART, "usb_submit_urb returned -ENODEV"); else - printk(KERN_ERR "%s: rx, usb_submit_urb failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "rx, usb_submit_urb failed: %d\n", ret); } exit: if (ret < 0 && ret != -ENODEV) - printk(KERN_ERR "%s: cannot submit rx urb - please unload the " - "driver and/or power cycle the device\n", - wiphy_name(priv->hw->wiphy)); + wiphy_err(priv->hw->wiphy, + "cannot submit rx urb - please unload the driver and/or power cycle the device\n"); return ret; } @@ -1437,8 +1433,8 @@ static int at76_startup_device(struct at76_priv *priv) ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config, sizeof(struct at76_card_config)); if (ret < 0) { - printk(KERN_ERR "%s: at76_set_card_command failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n", + ret); return ret; } @@ -1503,8 +1499,8 @@ static void at76_work_set_promisc(struct work_struct *work) ret = at76_set_mib(priv, &priv->mib_buf); if (ret < 0) - printk(KERN_ERR "%s: set_mib (promiscuous_mode) failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, + "set_mib (promiscuous_mode) failed: %d\n", ret); mutex_unlock(&priv->mtx); } @@ -1649,6 +1645,58 @@ static struct fwentry *at76_load_firmware(struct usb_device *udev, return NULL; } +static int at76_join(struct at76_priv *priv) +{ + struct at76_req_join join; + int ret; + + memset(&join, 0, sizeof(struct at76_req_join)); + memcpy(join.essid, priv->essid, priv->essid_size); + join.essid_size = priv->essid_size; + memcpy(join.bssid, priv->bssid, ETH_ALEN); + join.bss_type = INFRASTRUCTURE_MODE; + join.channel = priv->channel; + join.timeout = cpu_to_le16(2000); + + at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__); + ret = at76_set_card_command(priv->udev, CMD_JOIN, &join, + sizeof(struct at76_req_join)); + + if (ret < 0) { + wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n", + ret); + return 0; + } + + ret = at76_wait_completion(priv, CMD_JOIN); + at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret); + if (ret != CMD_STATUS_COMPLETE) { + wiphy_err(priv->hw->wiphy, "at76_wait_completion failed: %d\n", + ret); + return 0; + } + + at76_set_pm_mode(priv); + + return 0; +} + +static void at76_work_join_bssid(struct work_struct *work) +{ + struct at76_priv *priv = container_of(work, struct at76_priv, + work_join_bssid); + + if (priv->device_unplugged) + return; + + mutex_lock(&priv->mtx); + + if (is_valid_ether_addr(priv->bssid)) + at76_join(priv); + + mutex_unlock(&priv->mtx); +} + static void at76_mac80211_tx_callback(struct urb *urb) { struct at76_priv *priv = urb->context; @@ -1686,16 +1734,32 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) struct at76_priv *priv = hw->priv; struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; int padding, submit_len, ret; at76_dbg(DBG_MAC80211, "%s()", __func__); if (priv->tx_urb->status == -EINPROGRESS) { - printk(KERN_ERR "%s: %s called while tx urb is pending\n", - wiphy_name(priv->hw->wiphy), __func__); + wiphy_err(priv->hw->wiphy, + "%s called while tx urb is pending\n", __func__); return NETDEV_TX_BUSY; } + /* The following code lines are important when the device is going to + * authenticate with a new bssid. The driver must send CMD_JOIN before + * an authentication frame is transmitted. For this to succeed, the + * correct bssid of the AP must be known. As mac80211 does not inform + * drivers about the bssid prior to the authentication process the + * following workaround is necessary. If the TX frame is an + * authentication frame extract the bssid and send the CMD_JOIN. */ + if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) { + if (compare_ether_addr(priv->bssid, mgmt->bssid)) { + memcpy(priv->bssid, mgmt->bssid, ETH_ALEN); + ieee80211_queue_work(hw, &priv->work_join_bssid); + return NETDEV_TX_BUSY; + } + } + ieee80211_stop_queues(hw); at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */ @@ -1725,13 +1789,12 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) submit_len, at76_mac80211_tx_callback, priv); ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC); if (ret) { - printk(KERN_ERR "%s: error in tx submit urb: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret); if (ret == -EINVAL) - printk(KERN_ERR - "%s: -EINVAL: tx urb %p hcpriv %p complete %p\n", - wiphy_name(priv->hw->wiphy), priv->tx_urb, - priv->tx_urb->hcpriv, priv->tx_urb->complete); + wiphy_err(priv->hw->wiphy, + "-einval: tx urb %p hcpriv %p complete %p\n", + priv->tx_urb, + priv->tx_urb->hcpriv, priv->tx_urb->complete); } return 0; @@ -1748,8 +1811,8 @@ static int at76_mac80211_start(struct ieee80211_hw *hw) ret = at76_submit_rx_urb(priv); if (ret < 0) { - printk(KERN_ERR "%s: open: submit_rx_urb failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); + wiphy_err(priv->hw->wiphy, "open: submit_rx_urb failed: %d\n", + ret); goto error; } @@ -1770,6 +1833,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw) at76_dbg(DBG_MAC80211, "%s()", __func__); cancel_delayed_work(&priv->dwork_hw_scan); + cancel_work_sync(&priv->work_join_bssid); cancel_work_sync(&priv->work_set_promisc); mutex_lock(&priv->mtx); @@ -1818,42 +1882,6 @@ static void at76_remove_interface(struct ieee80211_hw *hw, at76_dbg(DBG_MAC80211, "%s()", __func__); } -static int at76_join(struct at76_priv *priv) -{ - struct at76_req_join join; - int ret; - - memset(&join, 0, sizeof(struct at76_req_join)); - memcpy(join.essid, priv->essid, priv->essid_size); - join.essid_size = priv->essid_size; - memcpy(join.bssid, priv->bssid, ETH_ALEN); - join.bss_type = INFRASTRUCTURE_MODE; - join.channel = priv->channel; - join.timeout = cpu_to_le16(2000); - - at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__); - ret = at76_set_card_command(priv->udev, CMD_JOIN, &join, - sizeof(struct at76_req_join)); - - if (ret < 0) { - printk(KERN_ERR "%s: at76_set_card_command failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); - return 0; - } - - ret = at76_wait_completion(priv, CMD_JOIN); - at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret); - if (ret != CMD_STATUS_COMPLETE) { - printk(KERN_ERR "%s: at76_wait_completion failed: %d\n", - wiphy_name(priv->hw->wiphy), ret); - return 0; - } - - at76_set_pm_mode(priv); - - return 0; -} - static void at76_dwork_hw_scan(struct work_struct *work) { struct at76_priv *priv = container_of(work, struct at76_priv, @@ -2107,6 +2135,7 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev) mutex_init(&priv->mtx); INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc); INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx); + INIT_WORK(&priv->work_join_bssid, at76_work_join_bssid); INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan); tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0); @@ -2281,14 +2310,12 @@ static int at76_init_new_device(struct at76_priv *priv, priv->mac80211_registered = 1; - printk(KERN_INFO "%s: USB %s, MAC %pM, firmware %d.%d.%d-%d\n", - wiphy_name(priv->hw->wiphy), - dev_name(&interface->dev), priv->mac_addr, - priv->fw_version.major, priv->fw_version.minor, - priv->fw_version.patch, priv->fw_version.build); - printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n", - wiphy_name(priv->hw->wiphy), - priv->regulatory_domain, priv->domain->name); + wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n", + dev_name(&interface->dev), priv->mac_addr, + priv->fw_version.major, priv->fw_version.minor, + priv->fw_version.patch, priv->fw_version.build); + wiphy_info(priv->hw->wiphy, "regulatory domain 0x%02x: %s\n", + priv->regulatory_domain, priv->domain->name); exit: return ret; @@ -2450,7 +2477,7 @@ static void at76_disconnect(struct usb_interface *interface) if (!priv) return; - printk(KERN_INFO "%s: disconnecting\n", wiphy_name(priv->hw->wiphy)); + wiphy_info(priv->hw->wiphy, "disconnecting\n"); at76_delete_device(priv); dev_printk(KERN_INFO, &interface->dev, "disconnected\n"); } @@ -2508,5 +2535,6 @@ MODULE_AUTHOR("Balint Seeber "); MODULE_AUTHOR("Pavel Roskin "); MODULE_AUTHOR("Guido Guenther "); MODULE_AUTHOR("Kalle Valo "); +MODULE_AUTHOR("Sebastian Smolorz "); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h index 1ec5ccffdbc03b54f87c13193fd2c1f7492d733f..4a37447dfc01e14e4a97210c9c0500b4a31dad16 100644 --- a/drivers/net/wireless/at76c50x-usb.h +++ b/drivers/net/wireless/at76c50x-usb.h @@ -99,7 +99,7 @@ struct hwcfg_r505 { u8 reserved2[14]; u8 cr15_values[14]; u8 reserved3[3]; -} __attribute__((packed)); +} __packed; struct hwcfg_rfmd { u8 cr20_values[14]; @@ -111,7 +111,7 @@ struct hwcfg_rfmd { u8 low_power_values[14]; u8 normal_power_values[14]; u8 reserved1[3]; -} __attribute__((packed)); +} __packed; struct hwcfg_intersil { u8 mac_addr[ETH_ALEN]; @@ -120,7 +120,7 @@ struct hwcfg_intersil { u8 pidvid[4]; u8 regulatory_domain; u8 reserved[1]; -} __attribute__((packed)); +} __packed; union at76_hwcfg { struct hwcfg_intersil i; @@ -149,14 +149,14 @@ struct at76_card_config { u8 ssid_len; u8 short_preamble; __le16 beacon_period; -} __attribute__((packed)); +} __packed; struct at76_command { u8 cmd; u8 reserved; __le16 size; u8 data[0]; -} __attribute__((packed)); +} __packed; /* Length of Atmel-specific Rx header before 802.11 frame */ #define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet) @@ -171,7 +171,7 @@ struct at76_rx_buffer { u8 noise_level; __le32 rx_time; u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; -} __attribute__((packed)); +} __packed; /* Length of Atmel-specific Tx header before 802.11 frame */ #define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet) @@ -182,7 +182,7 @@ struct at76_tx_buffer { u8 padding; u8 reserved[4]; u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; -} __attribute__((packed)); +} __packed; /* defines for scan_type below */ #define SCAN_TYPE_ACTIVE 0 @@ -198,7 +198,7 @@ struct at76_req_scan { __le16 max_channel_time; u8 essid_size; u8 international_scan; -} __attribute__((packed)); +} __packed; struct at76_req_ibss { u8 bssid[ETH_ALEN]; @@ -207,7 +207,7 @@ struct at76_req_ibss { u8 channel; u8 essid_size; u8 reserved[3]; -} __attribute__((packed)); +} __packed; struct at76_req_join { u8 bssid[ETH_ALEN]; @@ -217,7 +217,7 @@ struct at76_req_join { __le16 timeout; u8 essid_size; u8 reserved; -} __attribute__((packed)); +} __packed; struct set_mib_buffer { u8 type; @@ -229,7 +229,7 @@ struct set_mib_buffer { __le16 word; u8 addr[ETH_ALEN]; } data; -} __attribute__((packed)); +} __packed; struct mib_local { u16 reserved0; @@ -241,14 +241,14 @@ struct mib_local { u16 reserved2; u8 preamble_type; u16 reserved3; -} __attribute__((packed)); +} __packed; struct mib_mac_addr { u8 mac_addr[ETH_ALEN]; u8 res[2]; /* ??? */ u8 group_addr[4][ETH_ALEN]; u8 group_addr_status[4]; -} __attribute__((packed)); +} __packed; struct mib_mac { __le32 max_tx_msdu_lifetime; @@ -269,7 +269,7 @@ struct mib_mac { u8 desired_bssid[ETH_ALEN]; u8 desired_bsstype; /* ad-hoc or infrastructure */ u8 reserved2; -} __attribute__((packed)); +} __packed; struct mib_mac_mgmt { __le16 beacon_period; @@ -292,7 +292,7 @@ struct mib_mac_mgmt { u8 multi_domain_capability_enabled; u8 country_string[3]; u8 reserved[3]; -} __attribute__((packed)); +} __packed; struct mib_mac_wep { u8 privacy_invoked; /* 0 disable encr., 1 enable encr */ @@ -303,7 +303,7 @@ struct mib_mac_wep { __le32 wep_excluded_count; u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN]; u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ -} __attribute__((packed)); +} __packed; struct mib_phy { __le32 ed_threshold; @@ -320,19 +320,19 @@ struct mib_phy { u8 current_cca_mode; u8 phy_type; u8 current_reg_domain; -} __attribute__((packed)); +} __packed; struct mib_fw_version { u8 major; u8 minor; u8 patch; u8 build; -} __attribute__((packed)); +} __packed; struct mib_mdomain { u8 tx_powerlevel[14]; u8 channel_list[14]; /* 0 for invalid channels */ -} __attribute__((packed)); +} __packed; struct at76_fw_header { __le32 crc; /* CRC32 of the whole image */ @@ -346,7 +346,7 @@ struct at76_fw_header { __le32 int_fw_len; /* internal firmware image length */ __le32 ext_fw_offset; /* external firmware image offset */ __le32 ext_fw_len; /* external firmware image length */ -} __attribute__((packed)); +} __packed; /* a description of a regulatory domain and the allowed channels */ struct reg_domain { @@ -387,6 +387,7 @@ struct at76_priv { /* work queues */ struct work_struct work_set_promisc; struct work_struct work_submit_rx; + struct work_struct work_join_bssid; struct delayed_work dwork_hw_scan; struct tasklet_struct rx_tasklet; diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c index cf6f5c4174a67b9cf5909806cf81708ada286a8a..4604de09a8b28dc22aa140793acb59b6bf5f59a9 100644 --- a/drivers/net/wireless/ath/ar9170/cmd.c +++ b/drivers/net/wireless/ath/ar9170/cmd.c @@ -48,8 +48,7 @@ int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len) err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL); if (err) - printk(KERN_DEBUG "%s: writing memory failed\n", - wiphy_name(ar->hw->wiphy)); + wiphy_debug(ar->hw->wiphy, "writing memory failed\n"); return err; } @@ -67,8 +66,8 @@ int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf), (u8 *) buf, 0, NULL); if (err) - printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n", - wiphy_name(ar->hw->wiphy), reg, val); + wiphy_debug(ar->hw->wiphy, "writing reg %#x (val %#x) failed\n", + reg, val); return err; } diff --git a/drivers/net/wireless/ath/ar9170/led.c b/drivers/net/wireless/ath/ar9170/led.c index 86c4e79f6bc84c8daecedecbab9db1343a40371f..832d90087f8a7984335fee4399691c73a44cca59 100644 --- a/drivers/net/wireless/ath/ar9170/led.c +++ b/drivers/net/wireless/ath/ar9170/led.c @@ -133,8 +133,8 @@ static int ar9170_register_led(struct ar9170 *ar, int i, char *name, err = led_classdev_register(wiphy_dev(ar->hw->wiphy), &ar->leds[i].l); if (err) - printk(KERN_ERR "%s: failed to register %s LED (%d).\n", - wiphy_name(ar->hw->wiphy), ar->leds[i].name, err); + wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n", + ar->leds[i].name, err); else ar->leds[i].registered = true; diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index 2abc8757899418ab6875576dc556b703324624be..c67b05f3bcbdcace54e806ccdf69de5793766d0c 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -198,12 +198,13 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; struct ieee80211_hdr *hdr = (void *) txc->frame_data; - printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d " - "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n", - wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), - ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr), - le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control), - jiffies_to_msecs(arinfo->timeout - jiffies)); + wiphy_debug(ar->hw->wiphy, + "=> FRAME [skb:%p, q:%d, DA:[%pM] s:%d " + "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n", + skb, skb_get_queue_mapping(skb), + ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr), + le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control), + jiffies_to_msecs(arinfo->timeout - jiffies)); } static void __ar9170_dump_txqueue(struct ar9170 *ar, @@ -213,8 +214,8 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar, int i = 0; printk(KERN_DEBUG "---[ cut here ]---\n"); - printk(KERN_DEBUG "%s: %d entries in queue.\n", - wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); + wiphy_debug(ar->hw->wiphy, "%d entries in queue.\n", + skb_queue_len(queue)); skb_queue_walk(queue, skb) { printk(KERN_DEBUG "index:%d =>\n", i++); @@ -244,15 +245,14 @@ static void __ar9170_dump_txstats(struct ar9170 *ar) { int i; - printk(KERN_DEBUG "%s: QoS queue stats\n", - wiphy_name(ar->hw->wiphy)); + wiphy_debug(ar->hw->wiphy, "qos queue stats\n"); for (i = 0; i < __AR9170_NUM_TXQ; i++) - printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d " - " stopped:%d\n", wiphy_name(ar->hw->wiphy), i, - ar->tx_stats[i].limit, ar->tx_stats[i].len, - skb_queue_len(&ar->tx_status[i]), - ieee80211_queue_stopped(ar->hw, i)); + wiphy_debug(ar->hw->wiphy, + "queue:%d limit:%d len:%d waitack:%d stopped:%d\n", + i, ar->tx_stats[i].limit, ar->tx_stats[i].len, + skb_queue_len(&ar->tx_status[i]), + ieee80211_queue_stopped(ar->hw, i)); } #endif /* AR9170_QUEUE_STOP_DEBUG */ @@ -274,9 +274,9 @@ static void ar9170_recycle_expired(struct ar9170 *ar, if (time_is_before_jiffies(arinfo->timeout)) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => " - "recycle\n", wiphy_name(ar->hw->wiphy), - jiffies, arinfo->timeout); + wiphy_debug(ar->hw->wiphy, + "[%ld > %ld] frame expired => recycle\n", + jiffies, arinfo->timeout); ar9170_print_txheader(ar, skb); #endif /* AR9170_QUEUE_DEBUG */ __skb_unlink(skb, queue); @@ -317,8 +317,8 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, break; default: - printk(KERN_ERR "%s: invalid tx_status response (%x).\n", - wiphy_name(ar->hw->wiphy), tx_status); + wiphy_err(ar->hw->wiphy, + "invalid tx_status response (%x)\n", tx_status); break; } @@ -339,8 +339,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) { #ifdef AR9170_QUEUE_STOP_DEBUG - printk(KERN_DEBUG "%s: wake queue %d\n", - wiphy_name(ar->hw->wiphy), queue); + wiphy_debug(ar->hw->wiphy, "wake queue %d\n", queue); __ar9170_dump_txstats(ar); #endif /* AR9170_QUEUE_STOP_DEBUG */ ieee80211_wake_queue(ar->hw, queue); @@ -387,9 +386,9 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar, if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n", - wiphy_name(ar->hw->wiphy), mac, - ieee80211_get_DA(hdr)); + wiphy_debug(ar->hw->wiphy, + "skip frame => da %pm != %pm\n", + mac, ieee80211_get_DA(hdr)); ar9170_print_txheader(ar, skb); #endif /* AR9170_QUEUE_DEBUG */ continue; @@ -400,8 +399,8 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar, if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n", - wiphy_name(ar->hw->wiphy), rate, r); + wiphy_debug(ar->hw->wiphy, + "skip frame => rate %d != %d\n", rate, r); ar9170_print_txheader(ar, skb); #endif /* AR9170_QUEUE_DEBUG */ continue; @@ -413,9 +412,9 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar, } #ifdef AR9170_QUEUE_DEBUG - printk(KERN_ERR "%s: ESS:[%pM] does not have any " - "outstanding frames in queue.\n", - wiphy_name(ar->hw->wiphy), mac); + wiphy_err(ar->hw->wiphy, + "ESS:[%pM] does not have any outstanding frames in queue.\n", + mac); __ar9170_dump_txqueue(ar, queue); #endif /* AR9170_QUEUE_DEBUG */ spin_unlock_irqrestore(&queue->lock, flags); @@ -444,8 +443,8 @@ static void ar9170_tx_janitor(struct work_struct *work) for (i = 0; i < __AR9170_NUM_TXQ; i++) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n", - wiphy_name(ar->hw->wiphy), i); + wiphy_debug(ar->hw->wiphy, "garbage collector scans queue:%d\n", + i); ar9170_dump_txqueue(ar, &ar->tx_pending[i]); ar9170_dump_txqueue(ar, &ar->tx_status[i]); #endif /* AR9170_QUEUE_DEBUG */ @@ -495,8 +494,9 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT; #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n", - wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q); + wiphy_debug(ar->hw->wiphy, + "recv tx_status for %pm, p:%08x, q:%d\n", + cmd->tx_status.dst, phy, q); #endif /* AR9170_QUEUE_DEBUG */ skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst, @@ -582,7 +582,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) break; default: - printk(KERN_INFO "received unhandled event %x\n", cmd->type); + pr_info("received unhandled event %x\n", cmd->type); print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len); break; } @@ -675,9 +675,9 @@ static int ar9170_rx_mac_status(struct ar9170 *ar, /* TODO: update netdevice's RX dropped/errors statistics */ if (ar9170_nag_limiter(ar)) - printk(KERN_DEBUG "%s: received frame with " - "suspicious error code (%#x).\n", - wiphy_name(ar->hw->wiphy), error); + wiphy_debug(ar->hw->wiphy, + "received frame with suspicious error code (%#x).\n", + error); return -EINVAL; } @@ -704,9 +704,9 @@ static int ar9170_rx_mac_status(struct ar9170 *ar, break; default: if (ar9170_nag_limiter(ar)) - printk(KERN_ERR "%s: invalid plcp cck rate " - "(%x).\n", wiphy_name(ar->hw->wiphy), - head->plcp[0]); + wiphy_err(ar->hw->wiphy, + "invalid plcp cck rate (%x).\n", + head->plcp[0]); return -EINVAL; } break; @@ -740,9 +740,9 @@ static int ar9170_rx_mac_status(struct ar9170 *ar, break; default: if (ar9170_nag_limiter(ar)) - printk(KERN_ERR "%s: invalid plcp ofdm rate " - "(%x).\n", wiphy_name(ar->hw->wiphy), - head->plcp[0]); + wiphy_err(ar->hw->wiphy, + "invalid plcp ofdm rate (%x).\n", + head->plcp[0]); return -EINVAL; } if (status->band == IEEE80211_BAND_2GHZ) @@ -761,8 +761,7 @@ static int ar9170_rx_mac_status(struct ar9170 *ar, default: if (ar9170_nag_limiter(ar)) - printk(KERN_ERR "%s: invalid modulation\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, "invalid modulation\n"); return -EINVAL; } @@ -863,8 +862,8 @@ static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) ar->rx_mpdu.has_plcp = true; } else { if (ar9170_nag_limiter(ar)) - printk(KERN_ERR "%s: plcp info is clipped.\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "plcp info is clipped.\n"); return ; } break; @@ -877,8 +876,8 @@ static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) phy = (void *)(buf + mpdu_len); } else { if (ar9170_nag_limiter(ar)) - printk(KERN_ERR "%s: frame tail is clipped.\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "frame tail is clipped.\n"); return ; } @@ -888,9 +887,8 @@ static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) if (!ar9170_nag_limiter(ar)) return ; - printk(KERN_ERR "%s: rx stream did not start " - "with a first_mpdu frame tag.\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "rx stream did not start with a first_mpdu frame tag.\n"); return ; } @@ -954,8 +952,8 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) if (!ar->rx_failover_missing) { /* this is no "short read". */ if (ar9170_nag_limiter(ar)) { - printk(KERN_ERR "%s: missing tag!\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "missing tag!\n"); goto err_telluser; } else goto err_silent; @@ -963,9 +961,8 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) if (ar->rx_failover_missing > tlen) { if (ar9170_nag_limiter(ar)) { - printk(KERN_ERR "%s: possible multi " - "stream corruption!\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "possible multi stream corruption!\n"); goto err_telluser; } else goto err_silent; @@ -997,9 +994,8 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) if (ar->rx_failover_missing) { /* TODO: handle double stream corruption. */ if (ar9170_nag_limiter(ar)) { - printk(KERN_ERR "%s: double rx stream " - "corruption!\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "double rx stream corruption!\n"); goto err_telluser; } else goto err_silent; @@ -1042,9 +1038,9 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) if (tlen) { if (net_ratelimit()) - printk(KERN_ERR "%s: %d bytes of unprocessed " - "data left in rx stream!\n", - wiphy_name(ar->hw->wiphy), tlen); + wiphy_err(ar->hw->wiphy, + "%d bytes of unprocessed data left in rx stream!\n", + tlen); goto err_telluser; } @@ -1052,10 +1048,9 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) return ; err_telluser: - printk(KERN_ERR "%s: damaged RX stream data [want:%d, " - "data:%d, rx:%d, pending:%d ]\n", - wiphy_name(ar->hw->wiphy), clen, wlen, tlen, - ar->rx_failover_missing); + wiphy_err(ar->hw->wiphy, + "damaged RX stream data [want:%d, data:%d, rx:%d, pending:%d ]\n", + clen, wlen, tlen, ar->rx_failover_missing); if (ar->rx_failover_missing) print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET, @@ -1065,9 +1060,8 @@ void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET, skb->data, skb->len); - printk(KERN_ERR "%s: please check your hardware and cables, if " - "you see this message frequently.\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "If you see this message frequently, please check your hardware and cables.\n"); err_silent: if (ar->rx_failover_missing) { @@ -1384,10 +1378,10 @@ static void ar9170_tx(struct ar9170 *ar) if (remaining_space < frames) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: tx quota reached queue:%d, " - "remaining slots:%d, needed:%d\n", - wiphy_name(ar->hw->wiphy), i, remaining_space, - frames); + wiphy_debug(ar->hw->wiphy, + "tx quota reached queue:%d, " + "remaining slots:%d, needed:%d\n", + i, remaining_space, frames); #endif /* AR9170_QUEUE_DEBUG */ frames = remaining_space; } @@ -1396,18 +1390,14 @@ static void ar9170_tx(struct ar9170 *ar) ar->tx_stats[i].count += frames; if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: queue %d full\n", - wiphy_name(ar->hw->wiphy), i); - - printk(KERN_DEBUG "%s: stuck frames: ===>\n", - wiphy_name(ar->hw->wiphy)); + wiphy_debug(ar->hw->wiphy, "queue %d full\n", i); + wiphy_debug(ar->hw->wiphy, "stuck frames: ===>\n"); ar9170_dump_txqueue(ar, &ar->tx_pending[i]); ar9170_dump_txqueue(ar, &ar->tx_status[i]); #endif /* AR9170_QUEUE_DEBUG */ #ifdef AR9170_QUEUE_STOP_DEBUG - printk(KERN_DEBUG "%s: stop queue %d\n", - wiphy_name(ar->hw->wiphy), i); + wiphy_debug(ar->hw->wiphy, "stop queue %d\n", i); __ar9170_dump_txstats(ar); #endif /* AR9170_QUEUE_STOP_DEBUG */ ieee80211_stop_queue(ar->hw, i); @@ -1435,8 +1425,7 @@ static void ar9170_tx(struct ar9170 *ar) msecs_to_jiffies(AR9170_TX_TIMEOUT); #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: send frame q:%d =>\n", - wiphy_name(ar->hw->wiphy), i); + wiphy_debug(ar->hw->wiphy, "send frame q:%d =>\n", i); ar9170_print_txheader(ar, skb); #endif /* AR9170_QUEUE_DEBUG */ @@ -1453,26 +1442,25 @@ static void ar9170_tx(struct ar9170 *ar) } #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n", - wiphy_name(ar->hw->wiphy), i); + wiphy_debug(ar->hw->wiphy, + "ar9170_tx report for queue %d\n", i); - printk(KERN_DEBUG "%s: unprocessed pending frames left:\n", - wiphy_name(ar->hw->wiphy)); + wiphy_debug(ar->hw->wiphy, + "unprocessed pending frames left:\n"); ar9170_dump_txqueue(ar, &ar->tx_pending[i]); #endif /* AR9170_QUEUE_DEBUG */ if (unlikely(frames_failed)) { #ifdef AR9170_QUEUE_DEBUG - printk(KERN_DEBUG "%s: frames failed %d =>\n", - wiphy_name(ar->hw->wiphy), frames_failed); + wiphy_debug(ar->hw->wiphy, + "frames failed %d =>\n", frames_failed); #endif /* AR9170_QUEUE_DEBUG */ spin_lock_irqsave(&ar->tx_stats_lock, flags); ar->tx_stats[i].len -= frames_failed; ar->tx_stats[i].count -= frames_failed; #ifdef AR9170_QUEUE_STOP_DEBUG - printk(KERN_DEBUG "%s: wake queue %d\n", - wiphy_name(ar->hw->wiphy), i); + wiphy_debug(ar->hw->wiphy, "wake queue %d\n", i); __ar9170_dump_txstats(ar); #endif /* AR9170_QUEUE_STOP_DEBUG */ ieee80211_wake_queue(ar->hw, i); @@ -1917,6 +1905,24 @@ static int ar9170_get_stats(struct ieee80211_hw *hw, return 0; } +static int ar9170_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct ar9170 *ar = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + + if (idx != 0) + return -ENOENT; + + /* TODO: update noise value, e.g. call ar9170_set_channel */ + + survey->channel = conf->channel; + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = ar->noise[0]; + + return 0; +} + static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, const struct ieee80211_tx_queue_params *param) { @@ -1969,6 +1975,7 @@ static const struct ieee80211_ops ar9170_ops = { .get_tsf = ar9170_op_get_tsf, .set_key = ar9170_set_key, .get_stats = ar9170_get_stats, + .get_survey = ar9170_get_survey, .ampdu_action = ar9170_ampdu_action, }; diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c index 45a415ea809a8b886af02a56cc7b2a450d4833bb..0dbfcf79ac96b6abdab59dbe04512315e3681a48 100644 --- a/drivers/net/wireless/ath/ar9170/phy.c +++ b/drivers/net/wireless/ath/ar9170/phy.c @@ -670,8 +670,7 @@ static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz) ar9170_regwrite_finish(); err = ar9170_regwrite_result(); if (err) - printk(KERN_ERR "%s: rf init failed\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, "rf init failed\n"); return err; } @@ -1702,9 +1701,8 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, 0x200 | ar->phy_heavy_clip); if (err) { if (ar9170_nag_limiter(ar)) - printk(KERN_ERR "%s: failed to set " - "heavy clip\n", - wiphy_name(ar->hw->wiphy)); + wiphy_err(ar->hw->wiphy, + "failed to set heavy clip\n"); } } diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile index cc09595b781ae3ee7248c8a84222f68c495d3103..2242a140e4fe95c9dbcd17ca8d8ca728e5b3ff2e 100644 --- a/drivers/net/wireless/ath/ath5k/Makefile +++ b/drivers/net/wireless/ath/ath5k/Makefile @@ -13,5 +13,6 @@ ath5k-y += base.o ath5k-y += led.o ath5k-y += rfkill.o ath5k-y += ani.o +ath5k-y += sysfs.o ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o obj-$(CONFIG_ATH5K) += ath5k.o diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c index f2311ab35504cc5f10ba92485b126c900b7e860d..26dbe65fedb05fbc8dfe737d381b49b61bd24eaf 100644 --- a/drivers/net/wireless/ath/ath5k/ani.c +++ b/drivers/net/wireless/ath/ath5k/ani.c @@ -74,8 +74,8 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level) const s8 fr[] = { -78, -80 }; #endif if (level < 0 || level >= ARRAY_SIZE(sz)) { - ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, - "level out of range %d", level); + ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range", + level); return; } @@ -106,8 +106,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) if (level < 0 || level >= ARRAY_SIZE(val) || level > ah->ah_sc->ani_state.max_spur_level) { - ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, - "level out of range %d", level); + ATH5K_ERR(ah->ah_sc, "spur immunity level %d out of range", + level); return; } @@ -130,8 +130,7 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level) const int val[] = { 0, 4, 8 }; if (level < 0 || level >= ARRAY_SIZE(val)) { - ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, - "level out of range %d", level); + ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level); return; } @@ -481,14 +480,15 @@ ath5k_ani_calibration(struct ath5k_hw *ah) struct ath5k_ani_state *as = &ah->ah_sc->ani_state; int listen, ofdm_high, ofdm_low, cck_high, cck_low; - if (as->ani_mode != ATH5K_ANI_MODE_AUTO) - return; - /* get listen time since last call and add it to the counter because we - * might not have restarted the "ani period" last time */ + * might not have restarted the "ani period" last time. + * always do this to calculate the busy time also in manual mode */ listen = ath5k_hw_ani_get_listen_time(ah, as); as->listen_time += listen; + if (as->ani_mode != ATH5K_ANI_MODE_AUTO) + return; + ath5k_ani_save_and_clear_phy_errors(ah, as); ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000; diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 2785946f659a19efcbc2298126314b95dee721a7..ea6362a8988de33a51d4c916c5f82c55cd576e2e 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -204,6 +204,7 @@ #define AR5K_TUNE_TPC_TXPOWER false #define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */ #define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */ +#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */ #define AR5K_INIT_CARR_SENSE_EN 1 @@ -565,7 +566,7 @@ enum ath5k_pkt_type { ) /* - * DMA size definitions (2^n+2) + * DMA size definitions (2^(n+2)) */ enum ath5k_dmasize { AR5K_DMASIZE_4B = 0, @@ -1118,6 +1119,7 @@ struct ath5k_hw { /* Calibration timestamp */ unsigned long ah_cal_next_full; unsigned long ah_cal_next_ani; + unsigned long ah_cal_next_nf; /* Calibration mask */ u8 ah_cal_mask; @@ -1125,15 +1127,10 @@ struct ath5k_hw { /* * Function pointers */ - int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc, - u32 size, unsigned int flags); int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, unsigned int, unsigned int, int, enum ath5k_pkt_type, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); - int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, - unsigned int, unsigned int, unsigned int, unsigned int, - unsigned int, unsigned int); int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, struct ath5k_tx_status *); int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *, @@ -1148,6 +1145,9 @@ struct ath5k_hw { int ath5k_hw_attach(struct ath5k_softc *sc); void ath5k_hw_detach(struct ath5k_hw *ah); +int ath5k_sysfs_register(struct ath5k_softc *sc); +void ath5k_sysfs_unregister(struct ath5k_softc *sc); + /* LED functions */ int ath5k_init_leds(struct ath5k_softc *sc); void ath5k_led_enable(struct ath5k_softc *sc); @@ -1231,6 +1231,11 @@ int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time); /* Hardware Descriptor Functions */ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah); +int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + u32 size, unsigned int flags); +int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, + u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3); /* GPIO Functions */ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); @@ -1270,6 +1275,7 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah); int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); +void ath5k_hw_update_noise_floor(struct ath5k_hw *ah); /* Spur mitigation */ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, struct ieee80211_channel *channel); @@ -1280,6 +1286,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan); int ath5k_hw_phy_disable(struct ath5k_hw *ah); /* Antenna control */ void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode); +void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode); /* TX power setup */ int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower); diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index 31c008042bfe066238edc359111047d8064693d3..b32e28caeee2b6c730a48c2c91aefb7b5708be25 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -352,8 +352,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc) */ void ath5k_hw_detach(struct ath5k_hw *ah) { - ATH5K_TRACE(ah->ah_sc); - __set_bit(ATH_STAT_INVALID, ah->ah_sc->status); if (ah->ah_rf_banks != NULL) diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 648972df369d849fa8d2e6a1ba836953787aef63..0d5de2574dd106af6c3bdbad8e8028b4c24b3987 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -311,7 +311,8 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc, static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, struct ath5k_txq *txq, int padsize); -static inline void ath5k_txbuf_free(struct ath5k_softc *sc, + +static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf) { BUG_ON(!bf); @@ -321,9 +322,11 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc, PCI_DMA_TODEVICE); dev_kfree_skb_any(bf->skb); bf->skb = NULL; + bf->skbaddr = 0; + bf->desc->ds_data = 0; } -static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, +static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf) { struct ath5k_hw *ah = sc->ah; @@ -336,6 +339,8 @@ static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(bf->skb); bf->skb = NULL; + bf->skbaddr = 0; + bf->desc->ds_data = 0; } @@ -352,7 +357,6 @@ static void ath5k_txq_release(struct ath5k_softc *sc); static int ath5k_rx_start(struct ath5k_softc *sc); static void ath5k_rx_stop(struct ath5k_softc *sc); static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, - struct ath5k_desc *ds, struct sk_buff *skb, struct ath5k_rx_status *rs); static void ath5k_tasklet_rx(unsigned long data); @@ -384,7 +388,7 @@ static int ath5k_init(struct ath5k_softc *sc); static int ath5k_stop_locked(struct ath5k_softc *sc); static int ath5k_stop_hw(struct ath5k_softc *sc); static irqreturn_t ath5k_intr(int irq, void *dev_id); -static void ath5k_tasklet_reset(unsigned long data); +static void ath5k_reset_work(struct work_struct *work); static void ath5k_tasklet_calibrate(unsigned long data); @@ -578,7 +582,7 @@ ath5k_pci_probe(struct pci_dev *pdev, spin_lock_init(&sc->block); /* Set private data */ - pci_set_drvdata(pdev, hw); + pci_set_drvdata(pdev, sc); /* Setup interrupt handler */ ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); @@ -694,25 +698,23 @@ ath5k_pci_probe(struct pci_dev *pdev, static void __devexit ath5k_pci_remove(struct pci_dev *pdev) { - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath5k_softc *sc = hw->priv; + struct ath5k_softc *sc = pci_get_drvdata(pdev); ath5k_debug_finish_device(sc); - ath5k_detach(pdev, hw); + ath5k_detach(pdev, sc->hw); ath5k_hw_detach(sc->ah); kfree(sc->ah); free_irq(pdev->irq, sc); pci_iounmap(pdev, sc->iobase); pci_release_region(pdev, 0); pci_disable_device(pdev); - ieee80211_free_hw(hw); + ieee80211_free_hw(sc->hw); } #ifdef CONFIG_PM_SLEEP static int ath5k_pci_suspend(struct device *dev) { - struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev)); - struct ath5k_softc *sc = hw->priv; + struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev)); ath5k_led_off(sc); return 0; @@ -721,8 +723,7 @@ static int ath5k_pci_suspend(struct device *dev) static int ath5k_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath5k_softc *sc = hw->priv; + struct ath5k_softc *sc = pci_get_drvdata(pdev); /* * Suspend/Resume resets the PCI configuration space, so we have to @@ -768,7 +769,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) * return false w/o doing anything. MAC's that do * support it will return true w/o doing anything. */ - ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + if (ret < 0) goto err; if (ret > 0) @@ -829,11 +831,12 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); - tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc); + INIT_WORK(&sc->reset_work, ath5k_reset_work); + ret = ath5k_eeprom_read_mac(ah, mac); if (ret) { ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", @@ -864,6 +867,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) ath5k_init_leds(sc); + ath5k_sysfs_register(sc); + return 0; err_queues: ath5k_txq_release(sc); @@ -899,6 +904,7 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); ath5k_unregister_leds(sc); + ath5k_sysfs_unregister(sc); /* * NB: can't reclaim these until after ieee80211_ifdetach * returns because we'll get called back to reclaim node @@ -1111,8 +1117,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw) static int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) { - ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", - sc->curchan->center_freq, chan->center_freq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "channel set, resetting (%u -> %u MHz)\n", + sc->curchan->center_freq, chan->center_freq); /* * To switch channels clear any pending DMA operations; @@ -1228,21 +1235,23 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * - * To insure the last descriptor is self-linked we create + * To ensure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked - * entry is ``fixed'' naturally. This should be safe even + * entry is "fixed" naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the - * descriptor list. This insures the hardware always has + * descriptor list. This ensures the hardware always has * someplace to write a new frame. */ ds = bf->desc; ds->ds_link = bf->daddr; /* link to self */ ds->ds_data = bf->skbaddr; - ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); - if (ret) + ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); + if (ret) { + ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__); return ret; + } if (sc->rxlink != NULL) *sc->rxlink = bf->daddr; @@ -1347,7 +1356,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, mrr_tries[i] = info->control.rates[i + 1].count; } - ah->ah_setup_mrr_tx_desc(ah, ds, + ath5k_hw_setup_mrr_tx_desc(ah, ds, mrr_rate[0], mrr_tries[0], mrr_rate[1], mrr_tries[1], mrr_rate[2], mrr_tries[2]); @@ -1443,17 +1452,20 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) { struct ath5k_buf *bf; - ath5k_txbuf_free(sc, sc->bbuf); + ath5k_txbuf_free_skb(sc, sc->bbuf); list_for_each_entry(bf, &sc->txbuf, list) - ath5k_txbuf_free(sc, bf); + ath5k_txbuf_free_skb(sc, bf); list_for_each_entry(bf, &sc->rxbuf, list) - ath5k_rxbuf_free(sc, bf); + ath5k_rxbuf_free_skb(sc, bf); /* Free memory associated with all descriptors */ pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); + sc->desc = NULL; + sc->desc_daddr = 0; kfree(sc->bufptr); sc->bufptr = NULL; + sc->bbuf = NULL; } @@ -1602,7 +1614,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) list_for_each_entry_safe(bf, bf0, &txq->q, list) { ath5k_debug_printtxbuf(sc, bf); - ath5k_txbuf_free(sc, bf); + ath5k_txbuf_free_skb(sc, bf); spin_lock_bh(&sc->txbuflock); list_move_tail(&bf->list, &sc->txbuf); @@ -1716,13 +1728,11 @@ ath5k_rx_stop(struct ath5k_softc *sc) ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ ath5k_debug_printrxbuffs(sc, ah); - - sc->rxlink = NULL; /* just in case */ } static unsigned int -ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, - struct sk_buff *skb, struct ath5k_rx_status *rs) +ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb, + struct ath5k_rx_status *rs) { struct ath5k_hw *ah = sc->ah; struct ath_common *common = ath5k_hw_common(ah); @@ -1889,9 +1899,138 @@ static int ath5k_remove_padding(struct sk_buff *skb) } static void -ath5k_tasklet_rx(unsigned long data) +ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb, + struct ath5k_rx_status *rs) { struct ieee80211_rx_status *rxs; + + /* The MAC header is padded to have 32-bit boundary if the + * packet payload is non-zero. The general calculation for + * padsize would take into account odd header lengths: + * padsize = (4 - hdrlen % 4) % 4; However, since only + * even-length headers are used, padding can only be 0 or 2 + * bytes and we can optimize this a bit. In addition, we must + * not try to remove padding from short control frames that do + * not have payload. */ + ath5k_remove_padding(skb); + + rxs = IEEE80211_SKB_RXCB(skb); + + rxs->flag = 0; + if (unlikely(rs->rs_status & AR5K_RXERR_MIC)) + rxs->flag |= RX_FLAG_MMIC_ERROR; + + /* + * always extend the mac timestamp, since this information is + * also needed for proper IBSS merging. + * + * XXX: it might be too late to do it here, since rs_tstamp is + * 15bit only. that means TSF extension has to be done within + * 32768usec (about 32ms). it might be necessary to move this to + * the interrupt handler, like it is done in madwifi. + * + * Unfortunately we don't know when the hardware takes the rx + * timestamp (beginning of phy frame, data frame, end of rx?). + * The only thing we know is that it is hardware specific... + * On AR5213 it seems the rx timestamp is at the end of the + * frame, but i'm not sure. + * + * NOTE: mac80211 defines mactime at the beginning of the first + * data symbol. Since we don't have any time references it's + * impossible to comply to that. This affects IBSS merge only + * right now, so it's not too bad... + */ + rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp); + rxs->flag |= RX_FLAG_TSFT; + + rxs->freq = sc->curchan->center_freq; + rxs->band = sc->curband->band; + + rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi; + + rxs->antenna = rs->rs_antenna; + + if (rs->rs_antenna > 0 && rs->rs_antenna < 5) + sc->stats.antenna_rx[rs->rs_antenna]++; + else + sc->stats.antenna_rx[0]++; /* invalid */ + + rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate); + rxs->flag |= ath5k_rx_decrypted(sc, skb, rs); + + if (rxs->rate_idx >= 0 && rs->rs_rate == + sc->curband->bitrates[rxs->rate_idx].hw_value_short) + rxs->flag |= RX_FLAG_SHORTPRE; + + ath5k_debug_dump_skb(sc, skb, "RX ", 0); + + ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi); + + /* check beacons in IBSS mode */ + if (sc->opmode == NL80211_IFTYPE_ADHOC) + ath5k_check_ibss_tsf(sc, skb, rxs); + + ieee80211_rx(sc->hw, skb); +} + +/** ath5k_frame_receive_ok() - Do we want to receive this frame or not? + * + * Check if we want to further process this frame or not. Also update + * statistics. Return true if we want this frame, false if not. + */ +static bool +ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs) +{ + sc->stats.rx_all_count++; + + if (unlikely(rs->rs_status)) { + if (rs->rs_status & AR5K_RXERR_CRC) + sc->stats.rxerr_crc++; + if (rs->rs_status & AR5K_RXERR_FIFO) + sc->stats.rxerr_fifo++; + if (rs->rs_status & AR5K_RXERR_PHY) { + sc->stats.rxerr_phy++; + if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32) + sc->stats.rxerr_phy_code[rs->rs_phyerr]++; + return false; + } + if (rs->rs_status & AR5K_RXERR_DECRYPT) { + /* + * Decrypt error. If the error occurred + * because there was no hardware key, then + * let the frame through so the upper layers + * can process it. This is necessary for 5210 + * parts which have no way to setup a ``clear'' + * key cache entry. + * + * XXX do key cache faulting + */ + sc->stats.rxerr_decrypt++; + if (rs->rs_keyix == AR5K_RXKEYIX_INVALID && + !(rs->rs_status & AR5K_RXERR_CRC)) + return true; + } + if (rs->rs_status & AR5K_RXERR_MIC) { + sc->stats.rxerr_mic++; + return true; + } + + /* let crypto-error packets fall through in MNTR */ + if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || + sc->opmode != NL80211_IFTYPE_MONITOR) + return false; + } + + if (unlikely(rs->rs_more)) { + sc->stats.rxerr_jumbo++; + return false; + } + return true; +} + +static void +ath5k_tasklet_rx(unsigned long data) +{ struct ath5k_rx_status rs = {}; struct sk_buff *skb, *next_skb; dma_addr_t next_skb_addr; @@ -1901,7 +2040,6 @@ ath5k_tasklet_rx(unsigned long data) struct ath5k_buf *bf; struct ath5k_desc *ds; int ret; - int rx_flag; spin_lock(&sc->rxbuflock); if (list_empty(&sc->rxbuf)) { @@ -1909,8 +2047,6 @@ ath5k_tasklet_rx(unsigned long data) goto unlock; } do { - rx_flag = 0; - bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); BUG_ON(bf->skb == NULL); skb = bf->skb; @@ -1926,137 +2062,30 @@ ath5k_tasklet_rx(unsigned long data) else if (unlikely(ret)) { ATH5K_ERR(sc, "error in processing rx descriptor\n"); sc->stats.rxerr_proc++; - spin_unlock(&sc->rxbuflock); - return; + break; } - sc->stats.rx_all_count++; - - if (unlikely(rs.rs_status)) { - if (rs.rs_status & AR5K_RXERR_CRC) - sc->stats.rxerr_crc++; - if (rs.rs_status & AR5K_RXERR_FIFO) - sc->stats.rxerr_fifo++; - if (rs.rs_status & AR5K_RXERR_PHY) { - sc->stats.rxerr_phy++; - if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32) - sc->stats.rxerr_phy_code[rs.rs_phyerr]++; - goto next; - } - if (rs.rs_status & AR5K_RXERR_DECRYPT) { - /* - * Decrypt error. If the error occurred - * because there was no hardware key, then - * let the frame through so the upper layers - * can process it. This is necessary for 5210 - * parts which have no way to setup a ``clear'' - * key cache entry. - * - * XXX do key cache faulting - */ - sc->stats.rxerr_decrypt++; - if (rs.rs_keyix == AR5K_RXKEYIX_INVALID && - !(rs.rs_status & AR5K_RXERR_CRC)) - goto accept; - } - if (rs.rs_status & AR5K_RXERR_MIC) { - rx_flag |= RX_FLAG_MMIC_ERROR; - sc->stats.rxerr_mic++; - goto accept; - } + if (ath5k_receive_frame_ok(sc, &rs)) { + next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); - /* let crypto-error packets fall through in MNTR */ - if ((rs.rs_status & - ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || - sc->opmode != NL80211_IFTYPE_MONITOR) + /* + * If we can't replace bf->skb with a new skb under + * memory pressure, just skip this packet + */ + if (!next_skb) goto next; - } - if (unlikely(rs.rs_more)) { - sc->stats.rxerr_jumbo++; - goto next; + pci_unmap_single(sc->pdev, bf->skbaddr, + common->rx_bufsize, + PCI_DMA_FROMDEVICE); - } -accept: - next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); + skb_put(skb, rs.rs_datalen); - /* - * If we can't replace bf->skb with a new skb under memory - * pressure, just skip this packet - */ - if (!next_skb) - goto next; - - pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, - PCI_DMA_FROMDEVICE); - skb_put(skb, rs.rs_datalen); - - /* The MAC header is padded to have 32-bit boundary if the - * packet payload is non-zero. The general calculation for - * padsize would take into account odd header lengths: - * padsize = (4 - hdrlen % 4) % 4; However, since only - * even-length headers are used, padding can only be 0 or 2 - * bytes and we can optimize this a bit. In addition, we must - * not try to remove padding from short control frames that do - * not have payload. */ - ath5k_remove_padding(skb); - - rxs = IEEE80211_SKB_RXCB(skb); - - /* - * always extend the mac timestamp, since this information is - * also needed for proper IBSS merging. - * - * XXX: it might be too late to do it here, since rs_tstamp is - * 15bit only. that means TSF extension has to be done within - * 32768usec (about 32ms). it might be necessary to move this to - * the interrupt handler, like it is done in madwifi. - * - * Unfortunately we don't know when the hardware takes the rx - * timestamp (beginning of phy frame, data frame, end of rx?). - * The only thing we know is that it is hardware specific... - * On AR5213 it seems the rx timestamp is at the end of the - * frame, but i'm not sure. - * - * NOTE: mac80211 defines mactime at the beginning of the first - * data symbol. Since we don't have any time references it's - * impossible to comply to that. This affects IBSS merge only - * right now, so it's not too bad... - */ - rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp); - rxs->flag = rx_flag | RX_FLAG_TSFT; - - rxs->freq = sc->curchan->center_freq; - rxs->band = sc->curband->band; - - rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi; - - rxs->antenna = rs.rs_antenna; - - if (rs.rs_antenna > 0 && rs.rs_antenna < 5) - sc->stats.antenna_rx[rs.rs_antenna]++; - else - sc->stats.antenna_rx[0]++; /* invalid */ - - rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); - rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); + ath5k_receive_frame(sc, skb, &rs); - if (rxs->rate_idx >= 0 && rs.rs_rate == - sc->curband->bitrates[rxs->rate_idx].hw_value_short) - rxs->flag |= RX_FLAG_SHORTPRE; - - ath5k_debug_dump_skb(sc, skb, "RX ", 0); - - ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi); - - /* check beacons in IBSS mode */ - if (sc->opmode == NL80211_IFTYPE_ADHOC) - ath5k_check_ibss_tsf(sc, skb, rxs); - - ieee80211_rx(sc->hw, skb); - - bf->skb = next_skb; - bf->skbaddr = next_skb_addr; + bf->skb = next_skb; + bf->skbaddr = next_skb_addr; + } next: list_move_tail(&bf->list, &sc->rxbuf); } while (ath5k_rxbuf_setup(sc, bf) == 0); @@ -2065,8 +2094,6 @@ ath5k_tasklet_rx(unsigned long data) } - - /*************\ * TX Handling * \*************/ @@ -2266,8 +2293,8 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) * frame contents are done as needed and the slot time is * also adjusted based on current state. * - * This is called from software irq context (beacontq or restq - * tasklets) or user context from ath5k_beacon_config. + * This is called from software irq context (beacontq tasklets) + * or user context from ath5k_beacon_config. */ static void ath5k_beacon_send(struct ath5k_softc *sc) @@ -2298,7 +2325,9 @@ ath5k_beacon_send(struct ath5k_softc *sc) ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "stuck beacon time (%u missed)\n", sc->bmisscount); - tasklet_schedule(&sc->restq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "stuck beacon, resetting\n"); + ieee80211_queue_work(sc->hw, &sc->reset_work); } return; } @@ -2602,12 +2631,20 @@ ath5k_stop_locked(struct ath5k_softc *sc) if (!test_bit(ATH_STAT_INVALID, sc->status)) { ath5k_rx_stop(sc); ath5k_hw_phy_disable(ah); - } else - sc->rxlink = NULL; + } return 0; } +static void stop_tasklets(struct ath5k_softc *sc) +{ + tasklet_kill(&sc->rxtq); + tasklet_kill(&sc->txtq); + tasklet_kill(&sc->calib); + tasklet_kill(&sc->beacontq); + tasklet_kill(&sc->ani_tasklet); +} + /* * Stop the device, grabbing the top-level lock to protect * against concurrent entry through ath5k_init (which can happen @@ -2647,17 +2684,12 @@ ath5k_stop_hw(struct ath5k_softc *sc) ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "putting device to sleep\n"); } - ath5k_txbuf_free(sc, sc->bbuf); + ath5k_txbuf_free_skb(sc, sc->bbuf); mmiowb(); mutex_unlock(&sc->lock); - tasklet_kill(&sc->rxtq); - tasklet_kill(&sc->txtq); - tasklet_kill(&sc->restq); - tasklet_kill(&sc->calib); - tasklet_kill(&sc->beacontq); - tasklet_kill(&sc->ani_tasklet); + stop_tasklets(sc); ath5k_rfkill_hw_stop(sc->ah); @@ -2705,7 +2737,9 @@ ath5k_intr(int irq, void *dev_id) * Fatal errors are unrecoverable. * Typically these are caused by DMA errors. */ - tasklet_schedule(&sc->restq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "fatal int, resetting\n"); + ieee80211_queue_work(sc->hw, &sc->reset_work); } else if (unlikely(status & AR5K_INT_RXORN)) { /* * Receive buffers are full. Either the bus is busy or @@ -2717,8 +2751,11 @@ ath5k_intr(int irq, void *dev_id) * this guess is copied from the HAL. */ sc->stats.rxorn_intr++; - if (ah->ah_mac_srev < AR5K_SREV_AR5212) - tasklet_schedule(&sc->restq); + if (ah->ah_mac_srev < AR5K_SREV_AR5212) { + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "rx overrun, resetting\n"); + ieee80211_queue_work(sc->hw, &sc->reset_work); + } else tasklet_schedule(&sc->rxtq); } else { @@ -2731,7 +2768,7 @@ ath5k_intr(int irq, void *dev_id) * RXE bit is written, but it doesn't work at * least on older hardware revs. */ - sc->rxlink = NULL; + sc->stats.rxeol_intr++; } if (status & AR5K_INT_TXURN) { /* bump tx trigger level */ @@ -2764,14 +2801,6 @@ ath5k_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static void -ath5k_tasklet_reset(unsigned long data) -{ - struct ath5k_softc *sc = (void *)data; - - ath5k_reset(sc, sc->curchan); -} - /* * Periodically recalibrate the PHY to account * for temperature/environment changes. @@ -2785,10 +2814,6 @@ ath5k_tasklet_calibrate(unsigned long data) /* Only full calibration for now */ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; - /* Stop queues so that calibration - * doesn't interfere with tx */ - ieee80211_stop_queues(sc->hw); - ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", ieee80211_frequency_to_channel(sc->curchan->center_freq), sc->curchan->hw_value); @@ -2799,15 +2824,23 @@ ath5k_tasklet_calibrate(unsigned long data) * to load new gain values. */ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); - ath5k_reset(sc, sc->curchan); + ieee80211_queue_work(sc->hw, &sc->reset_work); } if (ath5k_hw_phy_calibrate(ah, sc->curchan)) ATH5K_ERR(sc, "calibration of channel %u failed\n", ieee80211_frequency_to_channel( sc->curchan->center_freq)); - /* Wake queues */ - ieee80211_wake_queues(sc->hw); + /* Noise floor calibration interrupts rx/tx path while I/Q calibration + * doesn't. We stop the queues so that calibration doesn't interfere + * with TX and don't run it as often */ + if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { + ah->ah_cal_next_nf = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF); + ieee80211_stop_queues(sc->hw); + ath5k_hw_update_noise_floor(ah); + ieee80211_wake_queues(sc->hw); + } ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; } @@ -2895,6 +2928,8 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, /* * Reset the hardware. If chan is not NULL, then also pause rx/tx * and change to the given channel. + * + * This should be called with sc->lock. */ static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) @@ -2904,8 +2939,11 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); + ath5k_hw_set_imr(ah, 0); + synchronize_irq(sc->pdev->irq); + stop_tasklets(sc); + if (chan) { - ath5k_hw_set_imr(ah, 0); ath5k_txq_cleanup(sc); ath5k_rx_stop(sc); @@ -2926,6 +2964,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode); + ah->ah_cal_next_full = jiffies; + ah->ah_cal_next_ani = jiffies; + ah->ah_cal_next_nf = jiffies; + /* * Change channels and update the h/w rate map if we're switching; * e.g. 11a to 11b/g. @@ -2947,6 +2989,16 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) return ret; } +static void ath5k_reset_work(struct work_struct *work) +{ + struct ath5k_softc *sc = container_of(work, struct ath5k_softc, + reset_work); + + mutex_lock(&sc->lock); + ath5k_reset(sc, sc->curchan); + mutex_unlock(&sc->lock); +} + static int ath5k_start(struct ieee80211_hw *hw) { return ath5k_init(hw->priv); @@ -3360,7 +3412,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) ath5k_debug_dump_skb(sc, skb, "BC ", 1); - ath5k_txbuf_free(sc, sc->bbuf); + ath5k_txbuf_free_skb(sc, sc->bbuf); sc->bbuf->skb = skb; ret = ath5k_beacon_setup(sc, sc->bbuf); if (ret) diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h index 56221bc7c8cd095208af6f8a591678df5f327e84..dc1241f9c4e83b9a331e6f6661ce689ef9dca226 100644 --- a/drivers/net/wireless/ath/ath5k/base.h +++ b/drivers/net/wireless/ath/ath5k/base.h @@ -47,6 +47,7 @@ #include #include #include +#include #include "ath5k.h" #include "debug.h" @@ -136,6 +137,7 @@ struct ath5k_statistics { unsigned int mib_intr; unsigned int rxorn_intr; + unsigned int rxeol_intr; }; #if CHAN_DEBUG @@ -189,7 +191,7 @@ struct ath5k_softc { unsigned int led_pin, /* GPIO pin for driving LED */ led_on; /* pin setting for LED on */ - struct tasklet_struct restq; /* reset tasklet */ + struct work_struct reset_work; /* deferred chip reset */ unsigned int rxbufsize; /* rx size based on mtu */ struct list_head rxbuf; /* receive buffer */ diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c index 74f007126f4100ce549e28b1569a43b5463eb42a..beae519aa735a63e91e7dc8ff2cab52b2493f695 100644 --- a/drivers/net/wireless/ath/ath5k/caps.c +++ b/drivers/net/wireless/ath/ath5k/caps.c @@ -34,7 +34,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) { u16 ee_header; - ATH5K_TRACE(ah->ah_sc); /* Capabilities stored in the EEPROM */ ee_header = ah->ah_capabilities.cap_eeprom.ee_header; @@ -123,8 +122,6 @@ int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result) { - ATH5K_TRACE(ah->ah_sc); - switch (cap_type) { case AR5K_CAP_NUM_TXQUEUES: if (result) { @@ -173,8 +170,6 @@ int ath5k_hw_get_capability(struct ath5k_hw *ah, int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id) { - ATH5K_TRACE(ah->ah_sc); - if (ah->ah_version == AR5K_AR5210) { AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA); @@ -186,8 +181,6 @@ int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, int ath5k_hw_disable_pspoll(struct ath5k_hw *ah) { - ATH5K_TRACE(ah->ah_sc); - if (ah->ah_version == AR5K_AR5210) { AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA); diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 6fb5c5ffa5b1c08060de32dfb51f05c251e1a87a..4cccc29964f6ddd0c7665369b034651d099a90cc 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -239,6 +239,9 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf, "TSF\t\t0x%016llx\tTU: %08x\n", (unsigned long long)tsf, TSF_TO_TU(tsf)); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -278,7 +281,8 @@ static ssize_t write_file_reset(struct file *file, size_t count, loff_t *ppos) { struct ath5k_softc *sc = file->private_data; - tasklet_schedule(&sc->restq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n"); + ieee80211_queue_work(sc->hw, &sc->reset_work); return count; } @@ -307,7 +311,6 @@ static const struct { { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, - { ATH5K_DEBUG_TRACE, "trace", "trace function calls" }, { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, }; @@ -334,6 +337,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, sc->debug.level == dbg_info[i].level ? '+' : ' ', dbg_info[i].level, dbg_info[i].desc); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -426,6 +432,16 @@ static ssize_t read_file_antenna(struct file *file, char __user *user_buf, "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n", (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0); + v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_0); + len += snprintf(buf+len, sizeof(buf)-len, + "\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v); + v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_1); + len += snprintf(buf+len, sizeof(buf)-len, + "AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v); + + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -535,6 +551,9 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf, len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n", st->tx_all_count); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -674,6 +693,9 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf, ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2))); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -729,6 +751,69 @@ static const struct file_operations fops_ani = { }; +/* debugfs: queues etc */ + +static ssize_t read_file_queue(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath5k_softc *sc = file->private_data; + char buf[700]; + unsigned int len = 0; + + struct ath5k_txq *txq; + struct ath5k_buf *bf, *bf0; + int i, n = 0; + + len += snprintf(buf+len, sizeof(buf)-len, + "available txbuffers: %d\n", sc->txbuf_len); + + for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) { + txq = &sc->txqs[i]; + + len += snprintf(buf+len, sizeof(buf)-len, + "%02d: %ssetup\n", i, txq->setup ? "" : "not "); + + if (!txq->setup) + continue; + + list_for_each_entry_safe(bf, bf0, &txq->q, list) + n++; + len += snprintf(buf+len, sizeof(buf)-len, " len: %d\n", n); + } + + if (len > sizeof(buf)) + len = sizeof(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_queue(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ath5k_softc *sc = file->private_data; + char buf[20]; + + if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + return -EFAULT; + + if (strncmp(buf, "start", 5) == 0) + ieee80211_wake_queues(sc->hw); + else if (strncmp(buf, "stop", 4) == 0) + ieee80211_stop_queues(sc->hw); + + return count; +} + + +static const struct file_operations fops_queue = { + .read = read_file_queue, + .write = write_file_queue, + .open = ath5k_debugfs_open, + .owner = THIS_MODULE, +}; + + /* init */ void @@ -772,6 +857,11 @@ ath5k_debug_init_device(struct ath5k_softc *sc) S_IWUSR | S_IRUSR, sc->debug.debugfs_phydir, sc, &fops_ani); + + sc->debug.debugfs_queue = debugfs_create_file("queue", + S_IWUSR | S_IRUSR, + sc->debug.debugfs_phydir, sc, + &fops_queue); } void @@ -790,6 +880,7 @@ ath5k_debug_finish_device(struct ath5k_softc *sc) debugfs_remove(sc->debug.debugfs_antenna); debugfs_remove(sc->debug.debugfs_frameerrors); debugfs_remove(sc->debug.debugfs_ani); + debugfs_remove(sc->debug.debugfs_queue); debugfs_remove(sc->debug.debugfs_phydir); } @@ -852,7 +943,7 @@ ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done, ds, (unsigned long long)bf->daddr, ds->ds_link, ds->ds_data, rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1, - rd->u.rx_stat.rx_status_0, rd->u.rx_stat.rx_status_0, + rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1, !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'); } @@ -867,7 +958,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) return; - printk(KERN_DEBUG "rx queue %x, link %p\n", + printk(KERN_DEBUG "rxdp %x, rxlink %p\n", ath5k_hw_get_rxdp(ah), sc->rxlink); spin_lock_bh(&sc->rxbuflock); diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h index ddd5b3a99e8d8fb33d665d086d0c06b1199454dd..606ae94a9157806b736f3c9d890c36a2f1edb762 100644 --- a/drivers/net/wireless/ath/ath5k/debug.h +++ b/drivers/net/wireless/ath/ath5k/debug.h @@ -77,6 +77,7 @@ struct ath5k_dbg_info { struct dentry *debugfs_antenna; struct dentry *debugfs_frameerrors; struct dentry *debugfs_ani; + struct dentry *debugfs_queue; }; /** @@ -115,18 +116,12 @@ enum ath5k_debug_level { ATH5K_DEBUG_DUMP_RX = 0x00000100, ATH5K_DEBUG_DUMP_TX = 0x00000200, ATH5K_DEBUG_DUMPBANDS = 0x00000400, - ATH5K_DEBUG_TRACE = 0x00001000, ATH5K_DEBUG_ANI = 0x00002000, ATH5K_DEBUG_ANY = 0xffffffff }; #ifdef CONFIG_ATH5K_DEBUG -#define ATH5K_TRACE(_sc) do { \ - if (unlikely((_sc)->debug.level & ATH5K_DEBUG_TRACE)) \ - printk(KERN_DEBUG "ath5k trace %s:%d\n", __func__, __LINE__); \ - } while (0) - #define ATH5K_DBG(_sc, _m, _fmt, ...) do { \ if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \ ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \ @@ -168,8 +163,6 @@ ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf); #include -#define ATH5K_TRACE(_sc) typecheck(struct ath5k_softc *, (_sc)) - static inline void __attribute__ ((format (printf, 3, 4))) ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {} diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index 7d7b646ab65a1a3f4d1f4b632500a3a3484502da..43244382f213f67b4733c2cd9b50c0c7ce90db18 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c @@ -91,14 +91,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN; /* - * Verify and set header length - * XXX: I only found that on 5210 code, does it work on 5211 ? + * Verify and set header length (only 5210) */ if (ah->ah_version == AR5K_AR5210) { - if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN) + if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210) return -EINVAL; tx_ctl->tx_control_0 |= - AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN); + AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210); } /*Differences between 5210-5211*/ @@ -110,11 +109,11 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, case AR5K_PKT_TYPE_PIFS: frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS; default: - frame_type = type /*<< 2 ?*/; + frame_type = type; } tx_ctl->tx_control_0 |= - AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) | + AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) | AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE); } else { @@ -123,21 +122,30 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT); tx_ctl->tx_control_1 |= - AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE); + AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211); } + #define _TX_FLAGS(_c, _flag) \ if (flags & AR5K_TXDESC_##_flag) { \ tx_ctl->tx_control_##_c |= \ AR5K_2W_TX_DESC_CTL##_c##_##_flag; \ } - +#define _TX_FLAGS_5211(_c, _flag) \ + if (flags & AR5K_TXDESC_##_flag) { \ + tx_ctl->tx_control_##_c |= \ + AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \ + } _TX_FLAGS(0, CLRDMASK); - _TX_FLAGS(0, VEOL); _TX_FLAGS(0, INTREQ); _TX_FLAGS(0, RTSENA); - _TX_FLAGS(1, NOACK); + + if (ah->ah_version == AR5K_AR5211) { + _TX_FLAGS_5211(0, VEOL); + _TX_FLAGS_5211(1, NOACK); + } #undef _TX_FLAGS +#undef _TX_FLAGS_5211 /* * WEP crap @@ -147,7 +155,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index, - AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); + AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX); } /* @@ -156,7 +164,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, if ((ah->ah_version == AR5K_AR5210) && (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA))) tx_ctl->tx_control_1 |= rtscts_duration & - AR5K_2W_TX_DESC_CTL1_RTS_DURATION; + AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210; return 0; } @@ -176,7 +184,6 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, struct ath5k_hw_4w_tx_ctl *tx_ctl; unsigned int frame_len; - ATH5K_TRACE(ah->ah_sc); tx_ctl = &desc->ud.ds_tx5212.tx_ctl; /* @@ -256,7 +263,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, if (key_index != AR5K_TXKEYIX_INVALID) { tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index, - AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); + AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX); } /* @@ -278,13 +285,17 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, /* * Initialize a 4-word multi rate retry tx control descriptor on 5212 */ -static int +int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) { struct ath5k_hw_4w_tx_ctl *tx_ctl; + /* no mrr support for cards older than 5212 */ + if (ah->ah_version < AR5K_AR5212) + return 0; + /* * Rates can be 0 as long as the retry count is 0 too. * A zero rate and nonzero retry count will put the HW into a mode where @@ -324,15 +335,6 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, return 0; } -/* no mrr support for cards older than 5212 */ -static int -ath5k_hw_setup_no_mrr(struct ath5k_hw *ah, struct ath5k_desc *desc, - unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, - u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) -{ - return 0; -} - /* * Proccess the tx status descriptor on 5210/5211 */ @@ -342,8 +344,6 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, struct ath5k_hw_2w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; - ATH5K_TRACE(ah->ah_sc); - tx_ctl = &desc->ud.ds_tx5210.tx_ctl; tx_status = &desc->ud.ds_tx5210.tx_stat; @@ -396,8 +396,6 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, struct ath5k_hw_4w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; - ATH5K_TRACE(ah->ah_sc); - tx_ctl = &desc->ud.ds_tx5212.tx_ctl; tx_status = &desc->ud.ds_tx5212.tx_stat; @@ -419,11 +417,11 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); ts->ts_antenna = (tx_status->tx_status_1 & - AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1; + AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1; ts->ts_status = 0; ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1, - AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX); + AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212); /* The longretry counter has the number of un-acked retries * for the final rate. To get the total number of retries @@ -485,12 +483,11 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, /* * Initialize an rx control descriptor */ -static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, - u32 size, unsigned int flags) +int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + u32 size, unsigned int flags) { struct ath5k_hw_rx_ctl *rx_ctl; - ATH5K_TRACE(ah->ah_sc); rx_ctl = &desc->ud.ds_rx.rx_ctl; /* @@ -502,10 +499,11 @@ static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, */ memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc)); + if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN)) + return -EINVAL; + /* Setup descriptor */ rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN; - if (unlikely(rx_ctl->rx_control_1 != size)) - return -EINVAL; if (flags & AR5K_RXDESC_INTREQ) rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ; @@ -521,13 +519,15 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, { struct ath5k_hw_rx_status *rx_status; - rx_status = &desc->ud.ds_rx.u.rx_stat; + rx_status = &desc->ud.ds_rx.rx_stat; /* No frame received / not ready */ if (unlikely(!(rx_status->rx_status_1 & - AR5K_5210_RX_DESC_STATUS1_DONE))) + AR5K_5210_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; + memset(rs, 0, sizeof(struct ath5k_rx_status)); + /* * Frame receive status */ @@ -537,15 +537,23 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); - rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0, - AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA); rs->rs_more = !!(rx_status->rx_status_0 & AR5K_5210_RX_DESC_STATUS0_MORE); - /* TODO: this timestamp is 13 bit, later on we assume 15 bit */ + /* TODO: this timestamp is 13 bit, later on we assume 15 bit! + * also the HAL code for 5210 says the timestamp is bits [10..22] of the + * TSF, and extends the timestamp here to 15 bit. + * we need to check on 5210... + */ rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); - rs->rs_status = 0; - rs->rs_phyerr = 0; + + if (ah->ah_version == AR5K_AR5211) + rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0, + AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211); + else + rs->rs_antenna = (rx_status->rx_status_0 & + AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210) + ? 2 : 1; /* * Key table status @@ -560,19 +568,21 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, * Receive/descriptor errors */ if (!(rx_status->rx_status_1 & - AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { + AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_CRC_ERROR) rs->rs_status |= AR5K_RXERR_CRC; - if (rx_status->rx_status_1 & - AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN) + /* only on 5210 */ + if ((ah->ah_version == AR5K_AR5210) && + (rx_status->rx_status_1 & + AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210)) rs->rs_status |= AR5K_RXERR_FIFO; if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; - rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1, + rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); } @@ -588,22 +598,20 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, * Proccess the rx status descriptor on 5212 */ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, - struct ath5k_desc *desc, struct ath5k_rx_status *rs) + struct ath5k_desc *desc, + struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; - struct ath5k_hw_rx_error *rx_err; - ATH5K_TRACE(ah->ah_sc); - rx_status = &desc->ud.ds_rx.u.rx_stat; - - /* Overlay on error */ - rx_err = &desc->ud.ds_rx.u.rx_err; + rx_status = &desc->ud.ds_rx.rx_stat; /* No frame received / not ready */ if (unlikely(!(rx_status->rx_status_1 & - AR5K_5212_RX_DESC_STATUS1_DONE))) + AR5K_5212_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; + memset(rs, 0, sizeof(struct ath5k_rx_status)); + /* * Frame receive status */ @@ -619,15 +627,13 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, AR5K_5212_RX_DESC_STATUS0_MORE); rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); - rs->rs_status = 0; - rs->rs_phyerr = 0; /* * Key table status */ if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID) rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, - AR5K_5212_RX_DESC_STATUS1_KEY_INDEX); + AR5K_5212_RX_DESC_STATUS1_KEY_INDEX); else rs->rs_keyix = AR5K_RXKEYIX_INVALID; @@ -635,7 +641,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, * Receive/descriptor errors */ if (!(rx_status->rx_status_1 & - AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { + AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR) rs->rs_status |= AR5K_RXERR_CRC; @@ -643,9 +649,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; - rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, - AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); - ath5k_ani_phy_error_report(ah, rs->rs_phyerr); + rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, + AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE); + if (!ah->ah_capabilities.cap_has_phyerr_counters) + ath5k_ani_phy_error_report(ah, rs->rs_phyerr); } if (rx_status->rx_status_1 & @@ -656,7 +663,6 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, AR5K_5212_RX_DESC_STATUS1_MIC_ERROR) rs->rs_status |= AR5K_RXERR_MIC; } - return 0; } @@ -665,29 +671,15 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, */ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) { - - if (ah->ah_version != AR5K_AR5210 && - ah->ah_version != AR5K_AR5211 && - ah->ah_version != AR5K_AR5212) - return -ENOTSUPP; - if (ah->ah_version == AR5K_AR5212) { - ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc; ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; - ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc; ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status; - } else { - ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc; + ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status; + } else if (ah->ah_version <= AR5K_AR5211) { ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc; - ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_no_mrr; ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status; - } - - if (ah->ah_version == AR5K_AR5212) - ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status; - else if (ah->ah_version <= AR5K_AR5211) ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status; - + } else + return -ENOTSUPP; return 0; } - diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h index 64538fbe416732f5353433d32baad77e13039c48..b2adb2a281c2e9035cdd17f52d4c40386a4b408b 100644 --- a/drivers/net/wireless/ath/ath5k/desc.h +++ b/drivers/net/wireless/ath/ath5k/desc.h @@ -17,28 +17,24 @@ */ /* - * Internal RX/TX descriptor structures - * (rX: reserved fields possibily used by future versions of the ar5k chipset) + * RX/TX descriptor structures */ /* - * common hardware RX control descriptor + * Common hardware RX control descriptor */ struct ath5k_hw_rx_ctl { u32 rx_control_0; /* RX control word 0 */ u32 rx_control_1; /* RX control word 1 */ } __packed; -/* RX control word 0 field/sflags */ -#define AR5K_DESC_RX_CTL0 0x00000000 - /* RX control word 1 fields/flags */ -#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff -#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 +#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */ +#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */ /* - * common hardware RX status descriptor - * 5210/11 and 5212 differ only in the flags defined below + * Common hardware RX status descriptor + * 5210, 5211 and 5212 differ only in the fields and flags defined below */ struct ath5k_hw_rx_status { u32 rx_status_0; /* RX status word 0 */ @@ -47,81 +43,69 @@ struct ath5k_hw_rx_status { /* 5210/5211 */ /* RX status word 0 fields/flags */ -#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff -#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 -#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 +#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */ +#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */ +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210 0x00004000 /* [5210] receive on ant 1 */ +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 /* reception rate */ #define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15 -#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 /* rssi */ #define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19 -#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000 -#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211 0x38000000 /* [5211] receive antenna */ +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211_S 27 /* RX status word 1 fields/flags */ -#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 -#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 -#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 -#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN 0x00000008 -#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 -#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 +#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */ +#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* reception success */ +#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */ +#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210 0x00000008 /* [5210] FIFO overrun */ +#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decyption CRC failure */ +#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 /* PHY error */ #define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5 -#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 -#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 +#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */ +#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decyption key index */ #define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9 -#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 +#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 /* 13 bit of TSF */ #define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15 -#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 +#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 /* key cache miss */ /* 5212 */ /* RX status word 0 fields/flags */ -#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff -#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 -#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 -#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 +#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */ +#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */ +#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 /* decompression CRC error */ +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 /* reception rate */ #define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15 -#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 /* rssi */ #define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20 -#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 /* receive antenna */ #define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28 /* RX status word 1 fields/flags */ -#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 -#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 -#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 -#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 -#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 -#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 -#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 -#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 +#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */ +#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* frame reception success */ +#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */ +#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 /* decryption CRC failure */ +#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 /* PHY error */ +#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 /* MIC decrypt error */ +#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */ +#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 /* decryption key index */ #define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9 -#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 +#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 /* first 15bit of the TSF */ #define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16 -#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 - -/* - * common hardware RX error descriptor - */ -struct ath5k_hw_rx_error { - u32 rx_error_0; /* RX status word 0 */ - u32 rx_error_1; /* RX status word 1 */ -} __packed; - -/* RX error word 0 fields/flags */ -#define AR5K_RX_DESC_ERROR0 0x00000000 - -/* RX error word 1 fields/flags */ -#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00 -#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8 +#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 /* key cache miss */ +#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE 0x0000ff00 /* phy error code overlays key index and valid fields */ +#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE_S 8 /** * enum ath5k_phy_error_code - PHY Error codes */ enum ath5k_phy_error_code { - AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */ + AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */ AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */ AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */ AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */ AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */ - AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */ + AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */ AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */ AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */ /* these are specific to the 5212 */ @@ -148,112 +132,111 @@ struct ath5k_hw_2w_tx_ctl { } __packed; /* TX control word 0 fields/flags */ -#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff -#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/ -#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12 -#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 +#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */ +#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210 0x0003f000 /* [5210] header length */ +#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210_S 12 +#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 /* tx rate */ #define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18 -#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 -#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 -#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET 0x00800000 /*[5210]*/ -#define AR5K_2W_TX_DESC_CTL0_VEOL 0x00800000 /*[5211]*/ -#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE 0x1c000000 /*[5210]*/ -#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26 -#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 -#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 - +#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */ +#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET_5210 0x00800000 /* [5210] long packet */ +#define AR5K_2W_TX_DESC_CTL0_VEOL_5211 0x00800000 /* [5211] virtual end-of-list */ +#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */ +#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 /* [5210] antenna selection */ +#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 /* [5211] antenna selection */ #define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \ (ah->ah_version == AR5K_AR5210 ? \ AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \ AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211) - #define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 -#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 -#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 +#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210 0x1c000000 /* [5210] frame type */ +#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210_S 26 +#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */ +#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* key is valid */ /* TX control word 1 fields/flags */ -#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff -#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 -#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000 -#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000 - -#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX \ +#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */ +#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */ +#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 0x0007e000 /* [5210] key table index */ +#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211 0x000fe000 /* [5211] key table index */ +#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX \ (ah->ah_version == AR5K_AR5210 ? \ - AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \ - AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211) - -#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 -#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/ -#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20 -#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/ -#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/ + AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 : \ + AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211) +#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_S 13 +#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211 0x00700000 /* [5211] frame type */ +#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211_S 20 +#define AR5K_2W_TX_DESC_CTL1_NOACK_5211 0x00800000 /* [5211] no ACK */ +#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210 0xfff80000 /* [5210] lower 13 bit of duration */ /* Frame types */ -#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00 -#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04 -#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08 -#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c -#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 0x10 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 1 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 2 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 3 +#define AR5K_AR5211_TX_DESC_FRAME_TYPE_BEACON 3 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4 +#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4 /* * 5212 hardware 4-word TX control descriptor */ struct ath5k_hw_4w_tx_ctl { u32 tx_control_0; /* TX control word 0 */ + u32 tx_control_1; /* TX control word 1 */ + u32 tx_control_2; /* TX control word 2 */ + u32 tx_control_3; /* TX control word 3 */ +} __packed; -#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff -#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 +/* TX control word 0 fields/flags */ +#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */ +#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 /* transmit power */ #define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16 -#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 -#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 -#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 -#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 +#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */ +#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 /* virtual end-of-list */ +#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */ +#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 /* TX antenna selection */ #define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 -#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 -#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 -#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 - - u32 tx_control_1; /* TX control word 1 */ +#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */ +#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* destination index valid */ +#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 /* precede frame with CTS */ -#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff -#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 -#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX 0x000fe000 -#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 -#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 +/* TX control word 1 fields/flags */ +#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */ +#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */ +#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX 0x000fe000 /* destination table index */ +#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX_S 13 +#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 /* frame type */ #define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20 -#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 -#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 +#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 /* no ACK */ +#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 /* compression processing */ #define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25 -#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 +#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 /* length of frame IV */ #define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27 -#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 +#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 /* length of frame ICV */ #define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29 - u32 tx_control_2; /* TX control word 2 */ - -#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff -#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE 0x00008000 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 -#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28 - - u32 tx_control_3; /* TX control word 3 */ - -#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f -#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 +/* TX control word 2 fields/flags */ +#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff /* RTS/CTS duration */ +#define AR5K_4W_TX_DESC_CTL2_DURATION_UPD_EN 0x00008000 /* frame duration update */ +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 /* series 0 max attempts */ +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 /* series 1 max attempts */ +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 /* series 2 max attempts */ +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 /* series 3 max attempts */ +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28 + +/* TX control word 3 fields/flags */ +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f /* series 0 tx rate */ +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 /* series 1 tx rate */ #define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5 -#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 /* series 2 tx rate */ #define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10 -#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 /* series 3 tx rate */ #define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15 -#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 +#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */ #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20 -} __packed; /* * Common TX status descriptor @@ -264,37 +247,34 @@ struct ath5k_hw_tx_status { } __packed; /* TX status word 0 fields/flags */ -#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 -#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 -#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 -#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 -/*??? -#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT 0x000000f0 -#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S 4 -*/ -#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 +#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */ +#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 /* excessive retries */ +#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 /* FIFO underrun */ +#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 /* TX filter indication */ +/* according to the HAL sources the spec has short/long retry counts reversed. + * we have it reversed to the HAL sources as well, for 5210 and 5211. + * For 5212 these fields are defined as RTS_FAIL_COUNT and DATA_FAIL_COUNT, + * but used respectively as SHORT and LONG retry count in the code later. This + * is consistent with the definitions here... TODO: check */ +#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 /* short retry count */ #define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4 -/*??? -#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT 0x00000f00 -#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S 8 -*/ -#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 +#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 /* long retry count */ #define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8 -#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT 0x0000f000 -#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S 12 -#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 +#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5211 0x0000f000 /* [5211+] virtual collision count */ +#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5212_S 12 +#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 /* TX timestamp */ #define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16 /* TX status word 1 fields/flags */ -#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 -#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe +#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 /* descriptor complete */ +#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe /* TX sequence number */ #define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1 -#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 +#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 /* signal strength of ACK */ #define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13 -#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX 0x00600000 -#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21 -#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000 -#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000 +#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212 0x00600000 /* [5212] final TX attempt series ix */ +#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212_S 21 +#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */ +#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */ /* * 5210/5211 hardware TX descriptor @@ -313,18 +293,15 @@ struct ath5k_hw_5212_tx_desc { } __packed; /* - * common hardware RX descriptor + * Common hardware RX descriptor */ struct ath5k_hw_all_rx_desc { - struct ath5k_hw_rx_ctl rx_ctl; - union { - struct ath5k_hw_rx_status rx_stat; - struct ath5k_hw_rx_error rx_err; - } u; + struct ath5k_hw_rx_ctl rx_ctl; + struct ath5k_hw_rx_status rx_stat; } __packed; /* - * Atheros hardware descriptor + * Atheros hardware DMA descriptor * This is read and written to by the hardware */ struct ath5k_desc { @@ -346,4 +323,3 @@ struct ath5k_desc { #define AR5K_TXDESC_CTSENA 0x0008 #define AR5K_TXDESC_INTREQ 0x0010 #define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/ - diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c index 941b51130a6f8da74b7c273301b43246088d546c..484f31870ba8f2c287e8817a22563a4d61f40f4e 100644 --- a/drivers/net/wireless/ath/ath5k/dma.c +++ b/drivers/net/wireless/ath/ath5k/dma.c @@ -48,7 +48,6 @@ */ void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) { - ATH5K_TRACE(ah->ah_sc); ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } @@ -62,7 +61,6 @@ int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) { unsigned int i; - ATH5K_TRACE(ah->ah_sc); ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); /* @@ -96,8 +94,6 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) */ void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) { - ATH5K_TRACE(ah->ah_sc); - ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); } @@ -125,7 +121,6 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) { u32 tx_queue; - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ @@ -186,7 +181,6 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) unsigned int i = 40; u32 tx_queue, pending; - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ @@ -297,7 +291,6 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) { u16 tx_reg; - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* @@ -340,7 +333,6 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) { u16 tx_reg; - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* @@ -400,8 +392,6 @@ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) u32 trigger_level, imr; int ret = -EIO; - ATH5K_TRACE(ah->ah_sc); - /* * Disable interrupts by setting the mask */ @@ -451,7 +441,6 @@ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) */ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) { - ATH5K_TRACE(ah->ah_sc); return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; } @@ -475,8 +464,6 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) { u32 data; - ATH5K_TRACE(ah->ah_sc); - /* * Read interrupt status from the Interrupt Status register * on 5210 diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index ed0263672d6d0cd9bf17d36d7d3c5126d8da248a..ae316fec4a6ae527817bda21cfd214210111e3b0 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -35,7 +35,6 @@ static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data) { u32 status, timeout; - ATH5K_TRACE(ah->ah_sc); /* * Initialize EEPROM access */ @@ -715,7 +714,7 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, /* Only one curve for RF5111 * find out which one and place - * in in pd_curves. + * in pd_curves. * Note: ee_x_gain is reversed here */ for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) { diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c index 64a27e73d02ed6f87cc9299ea78aa56603ad5a58..bc90503f4b7a9676db2c21dc2ff0dfa5177181c8 100644 --- a/drivers/net/wireless/ath/ath5k/gpio.c +++ b/drivers/net/wireless/ath/ath5k/gpio.c @@ -34,8 +34,6 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) /*5210 has different led mode handling*/ u32 led_5210; - ATH5K_TRACE(ah->ah_sc); - /*Reset led status*/ if (ah->ah_version != AR5K_AR5210) AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, @@ -82,7 +80,6 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) */ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) { - ATH5K_TRACE(ah->ah_sc); if (gpio >= AR5K_NUM_GPIO) return -EINVAL; @@ -98,7 +95,6 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) */ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) { - ATH5K_TRACE(ah->ah_sc); if (gpio >= AR5K_NUM_GPIO) return -EINVAL; @@ -114,7 +110,6 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) */ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) { - ATH5K_TRACE(ah->ah_sc); if (gpio >= AR5K_NUM_GPIO) return 0xffffffff; @@ -129,7 +124,6 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) { u32 data; - ATH5K_TRACE(ah->ah_sc); if (gpio >= AR5K_NUM_GPIO) return -EINVAL; @@ -153,7 +147,6 @@ void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, { u32 data; - ATH5K_TRACE(ah->ah_sc); if (gpio >= AR5K_NUM_GPIO) return; diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index 5212e275f1c7795531eb6a606d8cb1cbf8853359..86fdb6ddfaaa52d0abba9116c989438c0b44f851 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -59,8 +59,6 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) beacon_reg = 0; - ATH5K_TRACE(ah->ah_sc); - switch (op_mode) { case NL80211_IFTYPE_ADHOC: pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE; @@ -173,7 +171,6 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high) */ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) { - ATH5K_TRACE(ah->ah_sc); if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) <= timeout) return -EINVAL; @@ -192,7 +189,6 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) */ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) { - ATH5K_TRACE(ah->ah_sc); if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) <= timeout) return -EINVAL; @@ -297,7 +293,6 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) u32 low_id, high_id; u32 pcu_reg; - ATH5K_TRACE(ah->ah_sc); /* Set new station ID */ memcpy(common->macaddr, mac, ETH_ALEN); @@ -357,7 +352,6 @@ void ath5k_hw_set_associd(struct ath5k_hw *ah) void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) { struct ath_common *common = ath5k_hw_common(ah); - ATH5K_TRACE(ah->ah_sc); /* Cache bssid mask so that we can restore it * on reset */ @@ -382,7 +376,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) */ void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) { - ATH5K_TRACE(ah->ah_sc); AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } @@ -397,7 +390,6 @@ void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) */ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) { - ATH5K_TRACE(ah->ah_sc); AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } @@ -406,8 +398,6 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) */ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) { - ATH5K_TRACE(ah->ah_sc); - /* Set the multicat filter */ ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); } @@ -427,7 +417,6 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) { u32 data, filter = 0; - ATH5K_TRACE(ah->ah_sc); filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER); /*Radar detection for 5212*/ @@ -457,8 +446,6 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) { u32 data = 0; - ATH5K_TRACE(ah->ah_sc); - /* Set PHY error filter register on 5212*/ if (ah->ah_version == AR5K_AR5212) { if (filter & AR5K_RX_FILTER_RADARERR) @@ -533,8 +520,6 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) WARN_ON( i == ATH5K_MAX_TSF_READ ); - ATH5K_TRACE(ah->ah_sc); - return (((u64)tsf_upper1 << 32) | tsf_lower); } @@ -548,8 +533,6 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) */ void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) { - ATH5K_TRACE(ah->ah_sc); - ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); } @@ -565,8 +548,6 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah) { u32 val; - ATH5K_TRACE(ah->ah_sc); - val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF; /* @@ -586,7 +567,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) { u32 timer1, timer2, timer3; - ATH5K_TRACE(ah->ah_sc); /* * Set the additional timers by mode */ @@ -674,7 +654,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry) unsigned int i, type; u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET; - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry)); @@ -749,8 +728,6 @@ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, bool is_tkip; const u8 *key_ptr; - ATH5K_TRACE(ah->ah_sc); - is_tkip = (key->alg == ALG_TKIP); /* @@ -836,7 +813,6 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac) { u32 low_id, high_id; - ATH5K_TRACE(ah->ah_sc); /* Invalid entry (key table overflow) */ AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 492cbb15720d2076667681a8860a7ed866a2df5f..6284c389ba18ba6480b65dbf61c17e1f9f4632e0 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -378,8 +378,6 @@ enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) u32 data, type; struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; - ATH5K_TRACE(ah->ah_sc); - if (ah->ah_rf_banks == NULL || ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE) return AR5K_RFGAIN_INACTIVE; @@ -1167,7 +1165,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) * The median of the values in the history is then loaded into the * hardware for its own use for RSSI and CCA measurements. */ -static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) +void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 val; @@ -1248,7 +1246,6 @@ static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) /* * Perform a PHY calibration on RF5110 * -Fix BPSK/QAM Constellation (I/Q correction) - * -Calculate Noise Floor */ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel) @@ -1335,8 +1332,6 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, return ret; } - ath5k_hw_update_noise_floor(ah); - /* * Re-enable RX/TX and beacons */ @@ -1348,22 +1343,20 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, } /* - * Perform a PHY calibration on RF5111/5112 and newer chips + * Perform I/Q calibration on RF5111/5112 and newer chips */ -static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah, - struct ieee80211_channel *channel) +static int +ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) { u32 i_pwr, q_pwr; s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; int i; - ATH5K_TRACE(ah->ah_sc); if (!ah->ah_calibration || ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) - goto done; + return 0; /* Calibration has finished, get the results and re-run */ - /* work around empty results which can apparently happen on 5212 */ for (i = 0; i <= 10; i++) { iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR); @@ -1384,7 +1377,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah, /* protect against divide by 0 and loss of sign bits */ if (i_coffd == 0 || q_coffd < 2) - goto done; + return -1; i_coff = (-iq_corr) / i_coffd; i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ @@ -1410,17 +1403,6 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah, AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN); -done: - - /* TODO: Separate noise floor calibration from I/Q calibration - * since noise floor calibration interrupts rx path while I/Q - * calibration doesn't. We don't need to run noise floor calibration - * as often as I/Q calibration.*/ - ath5k_hw_update_noise_floor(ah); - - /* Initiate a gain_F calibration */ - ath5k_hw_request_rfgain_probe(ah); - return 0; } @@ -1434,8 +1416,10 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, if (ah->ah_radio == AR5K_RF5110) ret = ath5k_hw_rf5110_calibrate(ah, channel); - else - ret = ath5k_hw_rf511x_calibrate(ah, channel); + else { + ret = ath5k_hw_rf511x_iq_calibrate(ah); + ath5k_hw_request_rfgain_probe(ah); + } return ret; } @@ -1693,7 +1677,6 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, int ath5k_hw_phy_disable(struct ath5k_hw *ah) { - ATH5K_TRACE(ah->ah_sc); /*Just a try M.F.*/ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); @@ -1709,8 +1692,6 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan) u32 srev; u16 ret; - ATH5K_TRACE(ah->ah_sc); - /* * Set the radio chip access register */ @@ -1755,8 +1736,6 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan) static void /*TODO:Boundary check*/ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) { - ATH5K_TRACE(ah->ah_sc); - if (ah->ah_version != AR5K_AR5210) ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); } @@ -1789,19 +1768,50 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable) if (enable) { AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART, - AR5K_PHY_RESTART_DIV_GC, 0xc); + AR5K_PHY_RESTART_DIV_GC, 4); AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV, AR5K_PHY_FAST_ANT_DIV_EN); } else { AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART, - AR5K_PHY_RESTART_DIV_GC, 0x8); + AR5K_PHY_RESTART_DIV_GC, 0); AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV, AR5K_PHY_FAST_ANT_DIV_EN); } } +void +ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode) +{ + u8 ant0, ant1; + + /* + * In case a fixed antenna was set as default + * use the same switch table twice. + */ + if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A) + ant0 = ant1 = AR5K_ANT_SWTABLE_A; + else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B) + ant0 = ant1 = AR5K_ANT_SWTABLE_B; + else { + ant0 = AR5K_ANT_SWTABLE_A; + ant1 = AR5K_ANT_SWTABLE_B; + } + + /* Set antenna idle switch table */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL, + AR5K_PHY_ANT_CTL_SWTABLE_IDLE, + (ah->ah_ant_ctl[ee_mode][AR5K_ANT_CTL] | + AR5K_PHY_ANT_CTL_TXRX_EN)); + + /* Set antenna switch tables */ + ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant0], + AR5K_PHY_ANT_SWITCH_TABLE_0); + ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant1], + AR5K_PHY_ANT_SWITCH_TABLE_1); +} + /* * Set antenna operating mode */ @@ -1823,8 +1833,6 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) def_ant = ah->ah_def_ant; - ATH5K_TRACE(ah->ah_sc); - switch (channel->hw_value & CHANNEL_MODES) { case CHANNEL_A: case CHANNEL_T: @@ -1923,6 +1931,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) if (sta_id1) AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1); + ath5k_hw_set_antenna_switch(ah, ee_mode); /* Note: set diversity before default antenna * because it won't work correctly */ ath5k_hw_set_fast_div(ah, ee_mode, fast_div); @@ -2988,7 +2997,6 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 type; int ret; - ATH5K_TRACE(ah->ah_sc); if (txpower > AR5K_TUNE_MAX_TXPOWER) { ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); return -EINVAL; @@ -3084,8 +3092,6 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) struct ieee80211_channel *channel = ah->ah_current_channel; u8 ee_mode; - ATH5K_TRACE(ah->ah_sc); - switch (channel->hw_value & CHANNEL_MODES) { case CHANNEL_A: case CHANNEL_T: diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index f5831da33f7bccde87da720019259385a222b639..4186ff4c6e9c80a69d23824a356e35529c55b4c7 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -31,7 +31,6 @@ Queue Control Unit, DFS Control Unit Functions int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info) { - ATH5K_TRACE(ah->ah_sc); memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); return 0; } @@ -42,7 +41,6 @@ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, const struct ath5k_txq_info *queue_info) { - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) @@ -69,8 +67,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, unsigned int queue; int ret; - ATH5K_TRACE(ah->ah_sc); - /* * Get queue by type */ @@ -149,7 +145,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) { u32 pending; - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ @@ -177,7 +172,6 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) */ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) { - ATH5K_TRACE(ah->ah_sc); if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) return; @@ -195,7 +189,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) u32 cw_min, cw_max, retry_lg, retry_sh; struct ath5k_txq_info *tq = &ah->ah_txq[queue]; - ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); tq = &ah->ah_txq[queue]; @@ -523,8 +516,6 @@ int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) { u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); - ATH5K_TRACE(ah->ah_sc); - if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c index 307f80e83f941d8996fb84f5684e94db15155694..498aa28ea9e69bd58ba12cf889b1f2df23e3ed30 100644 --- a/drivers/net/wireless/ath/ath5k/reset.c +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -201,8 +201,6 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) int ret; u32 mask = val ? val : ~0U; - ATH5K_TRACE(ah->ah_sc); - /* Read-and-clear RX Descriptor Pointer*/ ath5k_hw_reg_read(ah, AR5K_RXDP); @@ -246,7 +244,6 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, unsigned int i; u32 staid, data; - ATH5K_TRACE(ah->ah_sc); staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1); switch (mode) { @@ -393,8 +390,6 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) mode = 0; clock = 0; - ATH5K_TRACE(ah->ah_sc); - /* Wakeup the device */ ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); if (ret) { @@ -734,7 +729,7 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, } static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, - struct ieee80211_channel *channel, u8 *ant, u8 ee_mode) + struct ieee80211_channel *channel, u8 ee_mode) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; s16 cck_ofdm_pwr_delta; @@ -768,17 +763,9 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, ee->ee_cck_ofdm_gain_delta; } - /* Set antenna idle switch table */ - AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL, - AR5K_PHY_ANT_CTL_SWTABLE_IDLE, - (ah->ah_ant_ctl[ee_mode][0] | - AR5K_PHY_ANT_CTL_TXRX_EN)); - - /* Set antenna switch tables */ - ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[0]], - AR5K_PHY_ANT_SWITCH_TABLE_0); - ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[1]], - AR5K_PHY_ANT_SWITCH_TABLE_1); + /* XXX: necessary here? is called from ath5k_hw_set_antenna_mode() + * too */ + ath5k_hw_set_antenna_switch(ah, ee_mode); /* Noise floor threshold */ ath5k_hw_reg_write(ah, @@ -855,7 +842,6 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, AR5K_PHY_NF_THRESH62, ee->ee_thr_62[ee_mode]); - /* False detect backoff for channels * that have spur noise. Write the new * cyclic power RSSI threshold. */ @@ -891,14 +877,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel) { struct ath_common *common = ath5k_hw_common(ah); - u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo; + u32 s_seq[10], s_led[3], staid1_flags, tsf_up, tsf_lo; u32 phy_tst1; - u8 mode, freq, ee_mode, ant[2]; + u8 mode, freq, ee_mode; int i, ret; - ATH5K_TRACE(ah->ah_sc); - - s_ant = 0; ee_mode = 0; staid1_flags = 0; tsf_up = 0; @@ -995,9 +978,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, } } - /* Save default antenna */ - s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA); - if (ah->ah_version == AR5K_AR5212) { /* Restore normal 32/40MHz clock operation * to avoid register access delay on certain @@ -1094,22 +1074,17 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, /* Write OFDM timings on 5212*/ if (ah->ah_version == AR5K_AR5212 && channel->hw_value & CHANNEL_OFDM) { - struct ath5k_eeprom_info *ee = - &ah->ah_capabilities.cap_eeprom; ret = ath5k_hw_write_ofdm_timings(ah, channel); if (ret) return ret; - /* Note: According to docs we can have a newer - * EEPROM on old hardware, so we need to verify - * that our hardware is new enough to have spur - * mitigation registers (delta phase etc) */ - if (ah->ah_mac_srev >= AR5K_SREV_AR5424 || - (ah->ah_mac_srev >= AR5K_SREV_AR5424 && - ee->ee_version >= AR5K_EEPROM_VERSION_5_3)) + /* Spur info is available only from EEPROM versions + * bigger than 5.3 but but the EEPOM routines will use + * static values for older versions */ + if (ah->ah_mac_srev >= AR5K_SREV_AR5424) ath5k_hw_set_spur_mitigation_filter(ah, - channel); + channel); } /*Enable/disable 802.11b mode on 5111 @@ -1123,21 +1098,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, AR5K_TXCFG_B_MODE); } - /* - * In case a fixed antenna was set as default - * use the same switch table twice. - */ - if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A) - ant[0] = ant[1] = AR5K_ANT_SWTABLE_A; - else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B) - ant[0] = ant[1] = AR5K_ANT_SWTABLE_B; - else { - ant[0] = AR5K_ANT_SWTABLE_A; - ant[1] = AR5K_ANT_SWTABLE_B; - } - /* Commit values from EEPROM */ - ath5k_hw_commit_eeprom_settings(ah, channel, ant, ee_mode); + ath5k_hw_commit_eeprom_settings(ah, channel, ee_mode); } else { /* @@ -1175,8 +1137,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32); } } - - ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA); } /* Ledstate */ diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..90757de7bf599e70291af0e226ae29d377b7c67e --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/sysfs.c @@ -0,0 +1,116 @@ +#include +#include + +#include "base.h" +#include "ath5k.h" +#include "reg.h" + +#define SIMPLE_SHOW_STORE(name, get, set) \ +static ssize_t ath5k_attr_show_##name(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct ath5k_softc *sc = dev_get_drvdata(dev); \ + return snprintf(buf, PAGE_SIZE, "%d\n", get); \ +} \ + \ +static ssize_t ath5k_attr_store_##name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct ath5k_softc *sc = dev_get_drvdata(dev); \ + int val; \ + \ + val = (int)simple_strtoul(buf, NULL, 10); \ + set(sc->ah, val); \ + return count; \ +} \ +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \ + ath5k_attr_show_##name, ath5k_attr_store_##name) + +#define SIMPLE_SHOW(name, get) \ +static ssize_t ath5k_attr_show_##name(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct ath5k_softc *sc = dev_get_drvdata(dev); \ + return snprintf(buf, PAGE_SIZE, "%d\n", get); \ +} \ +static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL) + +/*** ANI ***/ + +SIMPLE_SHOW_STORE(ani_mode, sc->ani_state.ani_mode, ath5k_ani_init); +SIMPLE_SHOW_STORE(noise_immunity_level, sc->ani_state.noise_imm_level, + ath5k_ani_set_noise_immunity_level); +SIMPLE_SHOW_STORE(spur_level, sc->ani_state.spur_level, + ath5k_ani_set_spur_immunity_level); +SIMPLE_SHOW_STORE(firstep_level, sc->ani_state.firstep_level, + ath5k_ani_set_firstep_level); +SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, sc->ani_state.ofdm_weak_sig, + ath5k_ani_set_ofdm_weak_signal_detection); +SIMPLE_SHOW_STORE(cck_weak_signal_detection, sc->ani_state.cck_weak_sig, + ath5k_ani_set_cck_weak_signal_detection); +SIMPLE_SHOW(spur_level_max, sc->ani_state.max_spur_level); + +static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL); +} +static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO, + ath5k_attr_show_noise_immunity_level_max, NULL); + +static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL); +} +static DEVICE_ATTR(firstep_level_max, S_IRUGO, + ath5k_attr_show_firstep_level_max, NULL); + +static struct attribute *ath5k_sysfs_entries_ani[] = { + &dev_attr_ani_mode.attr, + &dev_attr_noise_immunity_level.attr, + &dev_attr_spur_level.attr, + &dev_attr_firstep_level.attr, + &dev_attr_ofdm_weak_signal_detection.attr, + &dev_attr_cck_weak_signal_detection.attr, + &dev_attr_noise_immunity_level_max.attr, + &dev_attr_spur_level_max.attr, + &dev_attr_firstep_level_max.attr, + NULL +}; + +static struct attribute_group ath5k_attribute_group_ani = { + .name = "ani", + .attrs = ath5k_sysfs_entries_ani, +}; + + +/*** register / unregister ***/ + +int +ath5k_sysfs_register(struct ath5k_softc *sc) +{ + struct device *dev = &sc->pdev->dev; + int err; + + err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani); + if (err) { + ATH5K_ERR(sc, "failed to create sysfs group\n"); + return err; + } + + return 0; +} + +void +ath5k_sysfs_unregister(struct ath5k_softc *sc) +{ + struct device *dev = &sc->pdev->dev; + + sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani); +} diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index dd112be218abf951bc5875c107a43d1f9fe87ddf..973ae4f49f35fbd122f5eeec4c352a397c10c33e 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -32,7 +32,8 @@ ath9k_hw-y:= \ mac.o \ ar9002_mac.o \ ar9003_mac.o \ - ar9003_eeprom.o + ar9003_eeprom.o \ + ar9003_paprd.o obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 85fdd26039c8e70fd8f77888c083559006418caa..1a984b02e9e5c2cd02c7d039243df5bf385039cf 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -131,11 +131,8 @@ static int ath_ahb_probe(struct platform_device *pdev) ah = sc->sc_ah; ath9k_hw_name(ah, hw_name, sizeof(hw_name)); - printk(KERN_INFO - "%s: %s mem=0x%lx, irq=%d\n", - wiphy_name(hw->wiphy), - hw_name, - (unsigned long)mem, irq); + wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", + hw_name, (unsigned long)mem, irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index ba8b20f01594a8908f135620dd10b392d1498227..cc648b6ae31cef3b3a650ab7414b76fa061173e4 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2010 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,8 +17,99 @@ #include "hw.h" #include "hw-ops.h" -static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, - struct ath9k_channel *chan) +struct ani_ofdm_level_entry { + int spur_immunity_level; + int fir_step_level; + int ofdm_weak_signal_on; +}; + +/* values here are relative to the INI */ + +/* + * Legend: + * + * SI: Spur immunity + * FS: FIR Step + * WS: OFDM / CCK Weak Signal detection + * MRC-CCK: Maximal Ratio Combining for CCK + */ + +static const struct ani_ofdm_level_entry ofdm_level_table[] = { + /* SI FS WS */ + { 0, 0, 1 }, /* lvl 0 */ + { 1, 1, 1 }, /* lvl 1 */ + { 2, 2, 1 }, /* lvl 2 */ + { 3, 2, 1 }, /* lvl 3 (default) */ + { 4, 3, 1 }, /* lvl 4 */ + { 5, 4, 1 }, /* lvl 5 */ + { 6, 5, 1 }, /* lvl 6 */ + { 7, 6, 1 }, /* lvl 7 */ + { 7, 7, 1 }, /* lvl 8 */ + { 7, 8, 0 } /* lvl 9 */ +}; +#define ATH9K_ANI_OFDM_NUM_LEVEL \ + (sizeof(ofdm_level_table)/sizeof(ofdm_level_table[0])) +#define ATH9K_ANI_OFDM_MAX_LEVEL \ + (ATH9K_ANI_OFDM_NUM_LEVEL-1) +#define ATH9K_ANI_OFDM_DEF_LEVEL \ + 3 /* default level - matches the INI settings */ + +/* + * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm. + * With OFDM for single stream you just add up all antenna inputs, you're + * only interested in what you get after FFT. Signal aligment is also not + * required for OFDM because any phase difference adds up in the frequency + * domain. + * + * MRC requires extra work for use with CCK. You need to align the antenna + * signals from the different antenna before you can add the signals together. + * You need aligment of signals as CCK is in time domain, so addition can cancel + * your signal completely if phase is 180 degrees (think of adding sine waves). + * You also need to remove noise before the addition and this is where ANI + * MRC CCK comes into play. One of the antenna inputs may be stronger but + * lower SNR, so just adding after alignment can be dangerous. + * + * Regardless of alignment in time, the antenna signals add constructively after + * FFT and improve your reception. For more information: + * + * http://en.wikipedia.org/wiki/Maximal-ratio_combining + */ + +struct ani_cck_level_entry { + int fir_step_level; + int mrc_cck_on; +}; + +static const struct ani_cck_level_entry cck_level_table[] = { + /* FS MRC-CCK */ + { 0, 1 }, /* lvl 0 */ + { 1, 1 }, /* lvl 1 */ + { 2, 1 }, /* lvl 2 (default) */ + { 3, 1 }, /* lvl 3 */ + { 4, 0 }, /* lvl 4 */ + { 5, 0 }, /* lvl 5 */ + { 6, 0 }, /* lvl 6 */ + { 7, 0 }, /* lvl 7 (only for high rssi) */ + { 8, 0 } /* lvl 8 (only for high rssi) */ +}; + +#define ATH9K_ANI_CCK_NUM_LEVEL \ + (sizeof(cck_level_table)/sizeof(cck_level_table[0])) +#define ATH9K_ANI_CCK_MAX_LEVEL \ + (ATH9K_ANI_CCK_NUM_LEVEL-1) +#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \ + (ATH9K_ANI_CCK_NUM_LEVEL-3) +#define ATH9K_ANI_CCK_DEF_LEVEL \ + 2 /* default level - matches the INI settings */ + +/* Private to ani.c */ +static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) +{ + ath9k_hw_private_ops(ah)->ani_lower_immunity(ah); +} + +int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, + struct ath9k_channel *chan) { int i; @@ -48,7 +139,7 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah, stats->beacons += REG_READ(ah, AR_BEACON_CNT); } -static void ath9k_ani_restart(struct ath_hw *ah) +static void ath9k_ani_restart_old(struct ath_hw *ah) { struct ar5416AniState *aniState; struct ath_common *common = ath9k_hw_common(ah); @@ -96,7 +187,42 @@ static void ath9k_ani_restart(struct ath_hw *ah) aniState->cckPhyErrCount = 0; } -static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) +static void ath9k_ani_restart_new(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + struct ath_common *common = ath9k_hw_common(ah); + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + aniState->listenTime = 0; + + aniState->ofdmPhyErrBase = 0; + aniState->cckPhyErrBase = 0; + + ath_print(common, ATH_DBG_ANI, + "Writing ofdmbase=%08x cckbase=%08x\n", + aniState->ofdmPhyErrBase, + aniState->cckPhyErrBase); + + ENABLE_REGWRITE_BUFFER(ah); + + REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); + + REGWRITE_BUFFER_FLUSH(ah); + DISABLE_REGWRITE_BUFFER(ah); + + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + + aniState->ofdmPhyErrCount = 0; + aniState->cckPhyErrCount = 0; +} + +static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; struct ar5416AniState *aniState; @@ -168,7 +294,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) } } -static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) +static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; struct ar5416AniState *aniState; @@ -206,7 +332,125 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) } } -static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) +/* Adjust the OFDM Noise Immunity Level */ +static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) +{ + struct ar5416AniState *aniState = ah->curani; + struct ath_common *common = ath9k_hw_common(ah); + const struct ani_ofdm_level_entry *entry_ofdm; + const struct ani_cck_level_entry *entry_cck; + + aniState->noiseFloor = BEACON_RSSI(ah); + + ath_print(common, ATH_DBG_ANI, + "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", + aniState->ofdmNoiseImmunityLevel, + immunityLevel, aniState->noiseFloor, + aniState->rssiThrLow, aniState->rssiThrHigh); + + aniState->ofdmNoiseImmunityLevel = immunityLevel; + + entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; + entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; + + if (aniState->spurImmunityLevel != entry_ofdm->spur_immunity_level) + ath9k_hw_ani_control(ah, + ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + entry_ofdm->spur_immunity_level); + + if (aniState->firstepLevel != entry_ofdm->fir_step_level && + entry_ofdm->fir_step_level >= entry_cck->fir_step_level) + ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, + entry_ofdm->fir_step_level); + + if ((ah->opmode != NL80211_IFTYPE_STATION && + ah->opmode != NL80211_IFTYPE_ADHOC) || + aniState->noiseFloor <= aniState->rssiThrHigh) { + if (aniState->ofdmWeakSigDetectOff) + /* force on ofdm weak sig detect */ + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + true); + else if (aniState->ofdmWeakSigDetectOff == + entry_ofdm->ofdm_weak_signal_on) + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + entry_ofdm->ofdm_weak_signal_on); + } +} + +static void ath9k_hw_ani_ofdm_err_trigger_new(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + + if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) + ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1); +} + +/* + * Set the ANI settings to match an CCK level. + */ +static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel) +{ + struct ar5416AniState *aniState = ah->curani; + struct ath_common *common = ath9k_hw_common(ah); + const struct ani_ofdm_level_entry *entry_ofdm; + const struct ani_cck_level_entry *entry_cck; + + aniState->noiseFloor = BEACON_RSSI(ah); + ath_print(common, ATH_DBG_ANI, + "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", + aniState->cckNoiseImmunityLevel, immunityLevel, + aniState->noiseFloor, aniState->rssiThrLow, + aniState->rssiThrHigh); + + if ((ah->opmode == NL80211_IFTYPE_STATION || + ah->opmode == NL80211_IFTYPE_ADHOC) && + aniState->noiseFloor <= aniState->rssiThrLow && + immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) + immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; + + aniState->cckNoiseImmunityLevel = immunityLevel; + + entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; + entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; + + if (aniState->firstepLevel != entry_cck->fir_step_level && + entry_cck->fir_step_level >= entry_ofdm->fir_step_level) + ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, + entry_cck->fir_step_level); + + /* Skip MRC CCK for pre AR9003 families */ + if (!AR_SREV_9300_20_OR_LATER(ah)) + return; + + if (aniState->mrcCCKOff == entry_cck->mrc_cck_on) + ath9k_hw_ani_control(ah, + ATH9K_ANI_MRC_CCK, + entry_cck->mrc_cck_on); +} + +static void ath9k_hw_ani_cck_err_trigger_new(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + + if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) + ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1); +} + +static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah) { struct ar5416AniState *aniState; int32_t rssi; @@ -259,9 +503,53 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) } } +/* + * only lower either OFDM or CCK errors per turn + * we lower the other one next time + */ +static void ath9k_hw_ani_lower_immunity_new(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + + aniState = ah->curani; + + /* lower OFDM noise immunity */ + if (aniState->ofdmNoiseImmunityLevel > 0 && + (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) { + ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1); + return; + } + + /* lower CCK noise immunity */ + if (aniState->cckNoiseImmunityLevel > 0) + ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1); +} + +static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah) +{ + struct ath9k_channel *chan = ah->curchan; + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + u8 clockrate; /* in MHz */ + + if (!ah->curchan) /* should really check for CCK instead */ + clockrate = ATH9K_CLOCK_RATE_CCK; + else if (conf->channel->band == IEEE80211_BAND_2GHZ) + clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM; + else if (IS_CHAN_A_FAST_CLOCK(ah, chan)) + clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM; + else + clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; + + if (conf_is_ht40(conf)) + return clockrate * 2; + + return clockrate * 2; +} + static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) { struct ar5416AniState *aniState; + struct ath_common *common = ath9k_hw_common(ah); u32 txFrameCount, rxFrameCount, cycleCount; int32_t listenTime; @@ -271,15 +559,31 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) aniState = ah->curani; if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) { - listenTime = 0; ah->stats.ast_ani_lzero++; + ath_print(common, ATH_DBG_ANI, + "1st call: aniState->cycleCount=%d\n", + aniState->cycleCount); } else { int32_t ccdelta = cycleCount - aniState->cycleCount; int32_t rfdelta = rxFrameCount - aniState->rxFrameCount; int32_t tfdelta = txFrameCount - aniState->txFrameCount; - listenTime = (ccdelta - rfdelta - tfdelta) / 44000; + int32_t clock_rate; + + /* + * convert HW counter values to ms using mode + * specifix clock rate + */ + clock_rate = ath9k_hw_chan_2_clockrate_mhz(ah) * 1000;; + + listenTime = (ccdelta - rfdelta - tfdelta) / clock_rate; + + ath_print(common, ATH_DBG_ANI, + "cyclecount=%d, rfcount=%d, " + "tfcount=%d, listenTime=%d CLOCK_RATE=%d\n", + ccdelta, rfdelta, tfdelta, listenTime, clock_rate); } + aniState->cycleCount = cycleCount; aniState->txFrameCount = txFrameCount; aniState->rxFrameCount = rxFrameCount; @@ -287,7 +591,7 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) return listenTime; } -void ath9k_ani_reset(struct ath_hw *ah) +static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning) { struct ar5416AniState *aniState; struct ath9k_channel *chan = ah->curchan; @@ -340,7 +644,7 @@ void ath9k_ani_reset(struct ath_hw *ah) ah->curani->cckTrigLow = ah->config.cck_trig_low; } - ath9k_ani_restart(ah); + ath9k_ani_restart_old(ah); return; } @@ -362,7 +666,7 @@ void ath9k_ani_reset(struct ath_hw *ah) ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) & ~ATH9K_RX_FILTER_PHYERR); - ath9k_ani_restart(ah); + ath9k_ani_restart_old(ah); ENABLE_REGWRITE_BUFFER(ah); @@ -373,8 +677,102 @@ void ath9k_ani_reset(struct ath_hw *ah) DISABLE_REGWRITE_BUFFER(ah); } -void ath9k_hw_ani_monitor(struct ath_hw *ah, - struct ath9k_channel *chan) +/* + * Restore the ANI parameters in the HAL and reset the statistics. + * This routine should be called for every hardware reset and for + * every channel change. + */ +static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning) +{ + struct ar5416AniState *aniState = ah->curani; + struct ath9k_channel *chan = ah->curchan; + struct ath_common *common = ath9k_hw_common(ah); + + if (!DO_ANI(ah)) + return; + + BUG_ON(aniState == NULL); + ah->stats.ast_ani_reset++; + + /* only allow a subset of functions in AP mode */ + if (ah->opmode == NL80211_IFTYPE_AP) { + if (IS_CHAN_2GHZ(chan)) { + ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | + ATH9K_ANI_FIRSTEP_LEVEL); + if (AR_SREV_9300_20_OR_LATER(ah)) + ah->ani_function |= ATH9K_ANI_MRC_CCK; + } else + ah->ani_function = 0; + } + + /* always allow mode (on/off) to be controlled */ + ah->ani_function |= ATH9K_ANI_MODE; + + if (is_scanning || + (ah->opmode != NL80211_IFTYPE_STATION && + ah->opmode != NL80211_IFTYPE_ADHOC)) { + /* + * If we're scanning or in AP mode, the defaults (ini) + * should be in place. For an AP we assume the historical + * levels for this channel are probably outdated so start + * from defaults instead. + */ + if (aniState->ofdmNoiseImmunityLevel != + ATH9K_ANI_OFDM_DEF_LEVEL || + aniState->cckNoiseImmunityLevel != + ATH9K_ANI_CCK_DEF_LEVEL) { + ath_print(common, ATH_DBG_ANI, + "Restore defaults: opmode %u " + "chan %d Mhz/0x%x is_scanning=%d " + "ofdm:%d cck:%d\n", + ah->opmode, + chan->channel, + chan->channelFlags, + is_scanning, + aniState->ofdmNoiseImmunityLevel, + aniState->cckNoiseImmunityLevel); + + ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL); + ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL); + } + } else { + /* + * restore historical levels for this channel + */ + ath_print(common, ATH_DBG_ANI, + "Restore history: opmode %u " + "chan %d Mhz/0x%x is_scanning=%d " + "ofdm:%d cck:%d\n", + ah->opmode, + chan->channel, + chan->channelFlags, + is_scanning, + aniState->ofdmNoiseImmunityLevel, + aniState->cckNoiseImmunityLevel); + + ath9k_hw_set_ofdm_nil(ah, + aniState->ofdmNoiseImmunityLevel); + ath9k_hw_set_cck_nil(ah, + aniState->cckNoiseImmunityLevel); + } + + /* + * enable phy counters if hw supports or if not, enable phy + * interrupts (so we can count each one) + */ + ath9k_ani_restart_new(ah); + + ENABLE_REGWRITE_BUFFER(ah); + + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); + + REGWRITE_BUFFER_FLUSH(ah); + DISABLE_REGWRITE_BUFFER(ah); +} + +static void ath9k_hw_ani_monitor_old(struct ath_hw *ah, + struct ath9k_channel *chan) { struct ar5416AniState *aniState; struct ath_common *common = ath9k_hw_common(ah); @@ -390,7 +788,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, listenTime = ath9k_hw_ani_get_listen_time(ah); if (listenTime < 0) { ah->stats.ast_ani_lneg++; - ath9k_ani_restart(ah); + ath9k_ani_restart_old(ah); return; } @@ -444,21 +842,166 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, aniState->cckPhyErrCount <= aniState->listenTime * aniState->cckTrigLow / 1000) ath9k_hw_ani_lower_immunity(ah); - ath9k_ani_restart(ah); + ath9k_ani_restart_old(ah); } else if (aniState->listenTime > ah->aniperiod) { if (aniState->ofdmPhyErrCount > aniState->listenTime * aniState->ofdmTrigHigh / 1000) { - ath9k_hw_ani_ofdm_err_trigger(ah); - ath9k_ani_restart(ah); + ath9k_hw_ani_ofdm_err_trigger_old(ah); + ath9k_ani_restart_old(ah); } else if (aniState->cckPhyErrCount > aniState->listenTime * aniState->cckTrigHigh / 1000) { - ath9k_hw_ani_cck_err_trigger(ah); - ath9k_ani_restart(ah); + ath9k_hw_ani_cck_err_trigger_old(ah); + ath9k_ani_restart_old(ah); + } + } +} + +static void ath9k_hw_ani_monitor_new(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ar5416AniState *aniState; + struct ath_common *common = ath9k_hw_common(ah); + int32_t listenTime; + u32 phyCnt1, phyCnt2; + u32 ofdmPhyErrCnt, cckPhyErrCnt; + u32 ofdmPhyErrRate, cckPhyErrRate; + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + if (WARN_ON(!aniState)) + return; + + listenTime = ath9k_hw_ani_get_listen_time(ah); + if (listenTime <= 0) { + ah->stats.ast_ani_lneg++; + /* restart ANI period if listenTime is invalid */ + ath_print(common, ATH_DBG_ANI, + "listenTime=%d - on new ani monitor\n", + listenTime); + ath9k_ani_restart_new(ah); + return; + } + + aniState->listenTime += listenTime; + + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + + phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); + phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); + + if (phyCnt1 < aniState->ofdmPhyErrBase || + phyCnt2 < aniState->cckPhyErrBase) { + if (phyCnt1 < aniState->ofdmPhyErrBase) { + ath_print(common, ATH_DBG_ANI, + "phyCnt1 0x%x, resetting " + "counter value to 0x%x\n", + phyCnt1, + aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_1, + aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, + AR_PHY_ERR_OFDM_TIMING); + } + if (phyCnt2 < aniState->cckPhyErrBase) { + ath_print(common, ATH_DBG_ANI, + "phyCnt2 0x%x, resetting " + "counter value to 0x%x\n", + phyCnt2, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, + AR_PHY_ERR_CCK_TIMING); + } + return; + } + + ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; + ah->stats.ast_ani_ofdmerrs += + ofdmPhyErrCnt - aniState->ofdmPhyErrCount; + aniState->ofdmPhyErrCount = ofdmPhyErrCnt; + + cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; + ah->stats.ast_ani_cckerrs += + cckPhyErrCnt - aniState->cckPhyErrCount; + aniState->cckPhyErrCount = cckPhyErrCnt; + + ath_print(common, ATH_DBG_ANI, + "Errors: OFDM=0x%08x-0x%08x=%d " + "CCK=0x%08x-0x%08x=%d\n", + phyCnt1, + aniState->ofdmPhyErrBase, + ofdmPhyErrCnt, + phyCnt2, + aniState->cckPhyErrBase, + cckPhyErrCnt); + + ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 / + aniState->listenTime; + cckPhyErrRate = aniState->cckPhyErrCount * 1000 / + aniState->listenTime; + + ath_print(common, ATH_DBG_ANI, + "listenTime=%d OFDM:%d errs=%d/s CCK:%d " + "errs=%d/s ofdm_turn=%d\n", + listenTime, aniState->ofdmNoiseImmunityLevel, + ofdmPhyErrRate, aniState->cckNoiseImmunityLevel, + cckPhyErrRate, aniState->ofdmsTurn); + + if (aniState->listenTime > 5 * ah->aniperiod) { + if (ofdmPhyErrRate <= aniState->ofdmTrigLow && + cckPhyErrRate <= aniState->cckTrigLow) { + ath_print(common, ATH_DBG_ANI, + "1. listenTime=%d OFDM:%d errs=%d/s(<%d) " + "CCK:%d errs=%d/s(<%d) -> " + "ath9k_hw_ani_lower_immunity()\n", + aniState->listenTime, + aniState->ofdmNoiseImmunityLevel, + ofdmPhyErrRate, + aniState->ofdmTrigLow, + aniState->cckNoiseImmunityLevel, + cckPhyErrRate, + aniState->cckTrigLow); + ath9k_hw_ani_lower_immunity(ah); + aniState->ofdmsTurn = !aniState->ofdmsTurn; + } + ath_print(common, ATH_DBG_ANI, + "1 listenTime=%d ofdm=%d/s cck=%d/s - " + "calling ath9k_ani_restart_new()\n", + aniState->listenTime, ofdmPhyErrRate, cckPhyErrRate); + ath9k_ani_restart_new(ah); + } else if (aniState->listenTime > ah->aniperiod) { + /* check to see if need to raise immunity */ + if (ofdmPhyErrRate > aniState->ofdmTrigHigh && + (cckPhyErrRate <= aniState->cckTrigHigh || + aniState->ofdmsTurn)) { + ath_print(common, ATH_DBG_ANI, + "2 listenTime=%d OFDM:%d errs=%d/s(>%d) -> " + "ath9k_hw_ani_ofdm_err_trigger_new()\n", + aniState->listenTime, + aniState->ofdmNoiseImmunityLevel, + ofdmPhyErrRate, + aniState->ofdmTrigHigh); + ath9k_hw_ani_ofdm_err_trigger_new(ah); + ath9k_ani_restart_new(ah); + aniState->ofdmsTurn = false; + } else if (cckPhyErrRate > aniState->cckTrigHigh) { + ath_print(common, ATH_DBG_ANI, + "3 listenTime=%d CCK:%d errs=%d/s(>%d) -> " + "ath9k_hw_ani_cck_err_trigger_new()\n", + aniState->listenTime, + aniState->cckNoiseImmunityLevel, + cckPhyErrRate, + aniState->cckTrigHigh); + ath9k_hw_ani_cck_err_trigger_new(ah); + ath9k_ani_restart_new(ah); + aniState->ofdmsTurn = true; } } } -EXPORT_SYMBOL(ath9k_hw_ani_monitor); void ath9k_enable_mib_counters(struct ath_hw *ah) { @@ -495,6 +1038,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah) REG_WRITE(ah, AR_FILT_OFDM, 0); REG_WRITE(ah, AR_FILT_CCK, 0); } +EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt, @@ -542,7 +1086,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, * any of the MIB counters overflow/trigger so don't assume we're * here because a PHY error counter triggered. */ -void ath9k_hw_procmibevent(struct ath_hw *ah) +static void ath9k_hw_proc_mib_event_old(struct ath_hw *ah) { u32 phyCnt1, phyCnt2; @@ -555,8 +1099,15 @@ void ath9k_hw_procmibevent(struct ath_hw *ah) /* Clear the mib counters and save them in the stats */ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); - if (!DO_ANI(ah)) + if (!DO_ANI(ah)) { + /* + * We must always clear the interrupt cause by + * resetting the phy error regs. + */ + REG_WRITE(ah, AR_PHY_ERR_1, 0); + REG_WRITE(ah, AR_PHY_ERR_2, 0); return; + } /* NB: these are not reset-on-read */ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); @@ -584,14 +1135,51 @@ void ath9k_hw_procmibevent(struct ath_hw *ah) * check will never be true. */ if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh) - ath9k_hw_ani_ofdm_err_trigger(ah); + ath9k_hw_ani_ofdm_err_trigger_new(ah); if (aniState->cckPhyErrCount > aniState->cckTrigHigh) - ath9k_hw_ani_cck_err_trigger(ah); + ath9k_hw_ani_cck_err_trigger_old(ah); /* NB: always restart to insure the h/w counters are reset */ - ath9k_ani_restart(ah); + ath9k_ani_restart_old(ah); } } -EXPORT_SYMBOL(ath9k_hw_procmibevent); + +/* + * Process a MIB interrupt. We may potentially be invoked because + * any of the MIB counters overflow/trigger so don't assume we're + * here because a PHY error counter triggered. + */ +static void ath9k_hw_proc_mib_event_new(struct ath_hw *ah) +{ + u32 phyCnt1, phyCnt2; + + /* Reset these counters regardless */ + REG_WRITE(ah, AR_FILT_OFDM, 0); + REG_WRITE(ah, AR_FILT_CCK, 0); + if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING)) + REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR); + + /* Clear the mib counters and save them in the stats */ + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + + if (!DO_ANI(ah)) { + /* + * We must always clear the interrupt cause by + * resetting the phy error regs. + */ + REG_WRITE(ah, AR_PHY_ERR_1, 0); + REG_WRITE(ah, AR_PHY_ERR_2, 0); + return; + } + + /* NB: these are not reset-on-read */ + phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); + phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); + + /* NB: always restart to insure the h/w counters are reset */ + if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) || + ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) + ath9k_ani_restart_new(ah); +} void ath9k_hw_ani_setup(struct ath_hw *ah) { @@ -619,22 +1207,70 @@ void ath9k_hw_ani_init(struct ath_hw *ah) memset(ah->ani, 0, sizeof(ah->ani)); for (i = 0; i < ARRAY_SIZE(ah->ani); i++) { - ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH; - ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW; - ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH; - ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW; + if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) { + ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_NEW; + ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_NEW; + + ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_NEW; + ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_NEW; + + ah->ani[i].spurImmunityLevel = + ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; + + ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; + + ah->ani[i].ofdmPhyErrBase = 0; + ah->ani[i].cckPhyErrBase = 0; + + if (AR_SREV_9300_20_OR_LATER(ah)) + ah->ani[i].mrcCCKOff = + !ATH9K_ANI_ENABLE_MRC_CCK; + else + ah->ani[i].mrcCCKOff = true; + + ah->ani[i].ofdmsTurn = true; + } else { + ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_OLD; + ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_OLD; + + ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_OLD; + ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_OLD; + + ah->ani[i].spurImmunityLevel = + ATH9K_ANI_SPUR_IMMUNE_LVL_OLD; + ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD; + + ah->ani[i].ofdmPhyErrBase = + AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH_OLD; + ah->ani[i].cckPhyErrBase = + AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH_OLD; + ah->ani[i].cckWeakSigThreshold = + ATH9K_ANI_CCK_WEAK_SIG_THR; + } + ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; ah->ani[i].ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; - ah->ani[i].cckWeakSigThreshold = - ATH9K_ANI_CCK_WEAK_SIG_THR; - ah->ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; - ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL; - ah->ani[i].ofdmPhyErrBase = - AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH; - ah->ani[i].cckPhyErrBase = - AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH; + ah->ani[i].cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; + } + + /* + * since we expect some ongoing maintenance on the tables, let's sanity + * check here default level should not modify INI setting. + */ + if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) { + const struct ani_ofdm_level_entry *entry_ofdm; + const struct ani_cck_level_entry *entry_cck; + + entry_ofdm = &ofdm_level_table[ATH9K_ANI_OFDM_DEF_LEVEL]; + entry_cck = &cck_level_table[ATH9K_ANI_CCK_DEF_LEVEL]; + + ah->aniperiod = ATH9K_ANI_PERIOD_NEW; + ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW; + } else { + ah->aniperiod = ATH9K_ANI_PERIOD_OLD; + ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD; } ath_print(common, ATH_DBG_ANI, @@ -653,7 +1289,34 @@ void ath9k_hw_ani_init(struct ath_hw *ah) ath9k_enable_mib_counters(ah); - ah->aniperiod = ATH9K_ANI_PERIOD; if (ah->config.enable_ani) ah->proc_phyerr |= HAL_PROCESS_ANI; } + +void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah) +{ + struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); + struct ath_hw_ops *ops = ath9k_hw_ops(ah); + + priv_ops->ani_reset = ath9k_ani_reset_old; + priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_old; + + ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_old; + ops->ani_monitor = ath9k_hw_ani_monitor_old; + + ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v1\n"); +} + +void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah) +{ + struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); + struct ath_hw_ops *ops = ath9k_hw_ops(ah); + + priv_ops->ani_reset = ath9k_ani_reset_new; + priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_new; + + ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_new; + ops->ani_monitor = ath9k_hw_ani_monitor_new; + + ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v2\n"); +} diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index 3356762ea384af7fc4f3dfd049d8f27cca576ff5..f4d0a4d48b37b18ec3e3f0f828f6b30743d19147 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -23,23 +23,55 @@ #define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) -#define ATH9K_ANI_OFDM_TRIG_HIGH 500 -#define ATH9K_ANI_OFDM_TRIG_LOW 200 -#define ATH9K_ANI_CCK_TRIG_HIGH 200 -#define ATH9K_ANI_CCK_TRIG_LOW 100 +/* units are errors per second */ +#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500 +#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000 + +/* units are errors per second */ +#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200 +#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400 + +/* units are errors per second */ +#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200 +#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600 + +/* units are errors per second */ +#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100 +#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300 + #define ATH9K_ANI_NOISE_IMMUNE_LVL 4 #define ATH9K_ANI_USE_OFDM_WEAK_SIG true #define ATH9K_ANI_CCK_WEAK_SIG_THR false -#define ATH9K_ANI_SPUR_IMMUNE_LVL 7 -#define ATH9K_ANI_FIRSTEP_LVL 0 + +#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7 +#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3 + +#define ATH9K_ANI_FIRSTEP_LVL_OLD 0 +#define ATH9K_ANI_FIRSTEP_LVL_NEW 2 + #define ATH9K_ANI_RSSI_THR_HIGH 40 #define ATH9K_ANI_RSSI_THR_LOW 7 -#define ATH9K_ANI_PERIOD 100 + +#define ATH9K_ANI_PERIOD_OLD 100 +#define ATH9K_ANI_PERIOD_NEW 1000 + +/* in ms */ +#define ATH9K_ANI_POLLINTERVAL_OLD 100 +#define ATH9K_ANI_POLLINTERVAL_NEW 1000 #define HAL_NOISE_IMMUNE_MAX 4 #define HAL_SPUR_IMMUNE_MAX 7 #define HAL_FIRST_STEP_MAX 2 +#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0 +#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20 +#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0 +#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22 + +#define ATH9K_ANI_ENABLE_MRC_CCK true + +/* values here are relative to the INI */ + enum ath9k_ani_cmd { ATH9K_ANI_PRESENT = 0x1, ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2, @@ -49,7 +81,8 @@ enum ath9k_ani_cmd { ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20, ATH9K_ANI_MODE = 0x40, ATH9K_ANI_PHYERR_RESET = 0x80, - ATH9K_ANI_ALL = 0xff + ATH9K_ANI_MRC_CCK = 0x100, + ATH9K_ANI_ALL = 0xfff }; struct ath9k_mib_stats { @@ -60,9 +93,31 @@ struct ath9k_mib_stats { u32 beacons; }; +/* INI default values for ANI registers */ +struct ath9k_ani_default { + u16 m1ThreshLow; + u16 m2ThreshLow; + u16 m1Thresh; + u16 m2Thresh; + u16 m2CountThr; + u16 m2CountThrLow; + u16 m1ThreshLowExt; + u16 m2ThreshLowExt; + u16 m1ThreshExt; + u16 m2ThreshExt; + u16 firstep; + u16 firstepLow; + u16 cycpwrThr1; + u16 cycpwrThr1Ext; +}; + struct ar5416AniState { struct ath9k_channel *c; u8 noiseImmunityLevel; + u8 ofdmNoiseImmunityLevel; + u8 cckNoiseImmunityLevel; + bool ofdmsTurn; + u8 mrcCCKOff; u8 spurImmunityLevel; u8 firstepLevel; u8 ofdmWeakSigDetectOff; @@ -85,6 +140,7 @@ struct ar5416AniState { int16_t pktRssi[2]; int16_t ofdmErrRssi[2]; int16_t cckErrRssi[2]; + struct ath9k_ani_default iniDef; }; struct ar5416Stats { @@ -108,15 +164,13 @@ struct ar5416Stats { }; #define ah_mibStats stats.ast_mibstats -void ath9k_ani_reset(struct ath_hw *ah); -void ath9k_hw_ani_monitor(struct ath_hw *ah, - struct ath9k_channel *chan); void ath9k_enable_mib_counters(struct ath_hw *ah); void ath9k_hw_disable_mib_counters(struct ath_hw *ah); u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt, u32 *rxf_pcnt, u32 *txf_pcnt); -void ath9k_hw_procmibevent(struct ath_hw *ah); void ath9k_hw_ani_setup(struct ath_hw *ah); void ath9k_hw_ani_init(struct ath_hw *ah); +int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, + struct ath9k_channel *chan); #endif /* ANI_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h index 025c31ac614697a5f12d1f04188291daaa2c6e31..36f7d0639db333d51e904ab6a2871909d06937d1 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2010 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,729 +14,660 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef INITVALS_AR5008_H -#define INITVALS_AR5008_H - static const u32 ar5416Modes[][6] = { - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, - { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, - { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 }, - { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de }, - { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, - { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e }, - { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 }, - { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 }, - { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, - { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 }, - { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b }, - { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 }, - { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, - { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, - { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, - { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 }, - { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 }, - { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, - { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, - { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, - { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, - { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, - { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, - { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, - { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, - { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, - { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, - { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, - { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, - { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, - { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180}, + {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0}, + {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf}, + {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810}, + {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a}, + {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303}, + {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200}, + {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001}, + {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007}, + {0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0}, + {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de}, + {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e}, + {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e}, + {0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18}, + {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00}, + {0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190}, + {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081}, + {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0}, + {0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134}, + {0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b}, + {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020}, + {0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80}, + {0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80}, + {0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80}, + {0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120}, + {0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00}, + {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be}, + {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77}, + {0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c}, + {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8}, + {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384}, + {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880}, + {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788}, + {0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a}, + {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000}, + {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa}, + {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000}, + {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402}, + {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06}, + {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b}, + {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b}, + {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a}, + {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf}, + {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f}, + {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f}, + {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f}, + {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, }; static const u32 ar5416Common[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020015 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00007010, 0x00000000 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x40000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x000080c0, 0x2a82301a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0xffffffff }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c4, 0x00000000 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008264, 0x88000010 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x00008300, 0x00000000 }, - { 0x00008304, 0x00000000 }, - { 0x00008308, 0x00000000 }, - { 0x0000830c, 0x00000000 }, - { 0x00008310, 0x00000000 }, - { 0x00008314, 0x00000000 }, - { 0x00008318, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000007 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00070000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x000107ff }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xad848e19 }, - { 0x00009810, 0x7d14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x00009840, 0x206a002e }, - { 0x0000984c, 0x1284233c }, - { 0x00009854, 0x00000859 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x05100000 }, - { 0x0000a920, 0x05100000 }, - { 0x0000b920, 0x05100000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009948, 0x9280b212 }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5d50e188 }, - { 0x00009958, 0x00081fff }, - { 0x0000c95c, 0x004b6a8e }, - { 0x0000c968, 0x000003ce }, - { 0x00009970, 0x190fb515 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x001fff00 }, - { 0x000099ac, 0x00000000 }, - { 0x000099b0, 0x03051000 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000200 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x000000aa }, - { 0x000099fc, 0x00001042 }, - { 0x00009b00, 0x00000000 }, - { 0x00009b04, 0x00000001 }, - { 0x00009b08, 0x00000002 }, - { 0x00009b0c, 0x00000003 }, - { 0x00009b10, 0x00000004 }, - { 0x00009b14, 0x00000005 }, - { 0x00009b18, 0x00000008 }, - { 0x00009b1c, 0x00000009 }, - { 0x00009b20, 0x0000000a }, - { 0x00009b24, 0x0000000b }, - { 0x00009b28, 0x0000000c }, - { 0x00009b2c, 0x0000000d }, - { 0x00009b30, 0x00000010 }, - { 0x00009b34, 0x00000011 }, - { 0x00009b38, 0x00000012 }, - { 0x00009b3c, 0x00000013 }, - { 0x00009b40, 0x00000014 }, - { 0x00009b44, 0x00000015 }, - { 0x00009b48, 0x00000018 }, - { 0x00009b4c, 0x00000019 }, - { 0x00009b50, 0x0000001a }, - { 0x00009b54, 0x0000001b }, - { 0x00009b58, 0x0000001c }, - { 0x00009b5c, 0x0000001d }, - { 0x00009b60, 0x00000020 }, - { 0x00009b64, 0x00000021 }, - { 0x00009b68, 0x00000022 }, - { 0x00009b6c, 0x00000023 }, - { 0x00009b70, 0x00000024 }, - { 0x00009b74, 0x00000025 }, - { 0x00009b78, 0x00000028 }, - { 0x00009b7c, 0x00000029 }, - { 0x00009b80, 0x0000002a }, - { 0x00009b84, 0x0000002b }, - { 0x00009b88, 0x0000002c }, - { 0x00009b8c, 0x0000002d }, - { 0x00009b90, 0x00000030 }, - { 0x00009b94, 0x00000031 }, - { 0x00009b98, 0x00000032 }, - { 0x00009b9c, 0x00000033 }, - { 0x00009ba0, 0x00000034 }, - { 0x00009ba4, 0x00000035 }, - { 0x00009ba8, 0x00000035 }, - { 0x00009bac, 0x00000035 }, - { 0x00009bb0, 0x00000035 }, - { 0x00009bb4, 0x00000035 }, - { 0x00009bb8, 0x00000035 }, - { 0x00009bbc, 0x00000035 }, - { 0x00009bc0, 0x00000035 }, - { 0x00009bc4, 0x00000035 }, - { 0x00009bc8, 0x00000035 }, - { 0x00009bcc, 0x00000035 }, - { 0x00009bd0, 0x00000035 }, - { 0x00009bd4, 0x00000035 }, - { 0x00009bd8, 0x00000035 }, - { 0x00009bdc, 0x00000035 }, - { 0x00009be0, 0x00000035 }, - { 0x00009be4, 0x00000035 }, - { 0x00009be8, 0x00000035 }, - { 0x00009bec, 0x00000035 }, - { 0x00009bf0, 0x00000035 }, - { 0x00009bf4, 0x00000035 }, - { 0x00009bf8, 0x00000010 }, - { 0x00009bfc, 0x0000001a }, - { 0x0000a210, 0x40806333 }, - { 0x0000a214, 0x00106c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x018830c6 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x00000bb5 }, - { 0x0000a22c, 0x00000011 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a23c, 0x13c889af }, - { 0x0000a240, 0x38490a20 }, - { 0x0000a244, 0x00007bb6 }, - { 0x0000a248, 0x0fff3ffc }, - { 0x0000a24c, 0x00000001 }, - { 0x0000a250, 0x0000a000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0cc75380 }, - { 0x0000a25c, 0x0f0f0f01 }, - { 0x0000a260, 0xdfa91f01 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0e79e5c6 }, - { 0x0000b26c, 0x0e79e5c6 }, - { 0x0000c26c, 0x0e79e5c6 }, - { 0x0000d270, 0x00820820 }, - { 0x0000a278, 0x1ce739ce }, - { 0x0000a27c, 0x051701ce }, - { 0x0000a338, 0x00000000 }, - { 0x0000a33c, 0x00000000 }, - { 0x0000a340, 0x00000000 }, - { 0x0000a344, 0x00000000 }, - { 0x0000a348, 0x3fffffff }, - { 0x0000a34c, 0x3fffffff }, - { 0x0000a350, 0x3fffffff }, - { 0x0000a354, 0x0003ffff }, - { 0x0000a358, 0x79a8aa1f }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x08000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x1ce739ce }, - { 0x0000a398, 0x000001ce }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3c8, 0x00000246 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x1ce739ce }, - { 0x0000a3e0, 0x000001ce }, + /* Addr allmodes */ + {0x0000000c, 0x00000000}, + {0x00000030, 0x00020015}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000008}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00000054, 0x0000001f}, + {0x00000800, 0x00000000}, + {0x00000804, 0x00000000}, + {0x00000808, 0x00000000}, + {0x0000080c, 0x00000000}, + {0x00000810, 0x00000000}, + {0x00000814, 0x00000000}, + {0x00000818, 0x00000000}, + {0x0000081c, 0x00000000}, + {0x00000820, 0x00000000}, + {0x00000824, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x00001230, 0x00000000}, + {0x00001270, 0x00000000}, + {0x00001038, 0x00000000}, + {0x00001078, 0x00000000}, + {0x000010b8, 0x00000000}, + {0x000010f8, 0x00000000}, + {0x00001138, 0x00000000}, + {0x00001178, 0x00000000}, + {0x000011b8, 0x00000000}, + {0x000011f8, 0x00000000}, + {0x00001238, 0x00000000}, + {0x00001278, 0x00000000}, + {0x000012b8, 0x00000000}, + {0x000012f8, 0x00000000}, + {0x00001338, 0x00000000}, + {0x00001378, 0x00000000}, + {0x000013b8, 0x00000000}, + {0x000013f8, 0x00000000}, + {0x00001438, 0x00000000}, + {0x00001478, 0x00000000}, + {0x000014b8, 0x00000000}, + {0x000014f8, 0x00000000}, + {0x00001538, 0x00000000}, + {0x00001578, 0x00000000}, + {0x000015b8, 0x00000000}, + {0x000015f8, 0x00000000}, + {0x00001638, 0x00000000}, + {0x00001678, 0x00000000}, + {0x000016b8, 0x00000000}, + {0x000016f8, 0x00000000}, + {0x00001738, 0x00000000}, + {0x00001778, 0x00000000}, + {0x000017b8, 0x00000000}, + {0x000017f8, 0x00000000}, + {0x0000103c, 0x00000000}, + {0x0000107c, 0x00000000}, + {0x000010bc, 0x00000000}, + {0x000010fc, 0x00000000}, + {0x0000113c, 0x00000000}, + {0x0000117c, 0x00000000}, + {0x000011bc, 0x00000000}, + {0x000011fc, 0x00000000}, + {0x0000123c, 0x00000000}, + {0x0000127c, 0x00000000}, + {0x000012bc, 0x00000000}, + {0x000012fc, 0x00000000}, + {0x0000133c, 0x00000000}, + {0x0000137c, 0x00000000}, + {0x000013bc, 0x00000000}, + {0x000013fc, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00004030, 0x00000002}, + {0x0000403c, 0x00000002}, + {0x00007010, 0x00000000}, + {0x00007038, 0x000004c2}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000700}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008048, 0x40000000}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x000080c0, 0x2a82301a}, + {0x000080c4, 0x05dc01e0}, + {0x000080c8, 0x1f402710}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00001e00}, + {0x000080d4, 0x00000000}, + {0x000080d8, 0x00400000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x003f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080f8, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00020000}, + {0x00008104, 0x00000001}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000168}, + {0x00008118, 0x000100aa}, + {0x0000811c, 0x00003210}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x00000000}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x32143320}, + {0x00008174, 0xfaa4fa50}, + {0x00008178, 0x00000100}, + {0x0000817c, 0x00000000}, + {0x000081c4, 0x00000000}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008200, 0x00000000}, + {0x00008204, 0x00000000}, + {0x00008208, 0x00000000}, + {0x0000820c, 0x00000000}, + {0x00008210, 0x00000000}, + {0x00008214, 0x00000000}, + {0x00008218, 0x00000000}, + {0x0000821c, 0x00000000}, + {0x00008220, 0x00000000}, + {0x00008224, 0x00000000}, + {0x00008228, 0x00000000}, + {0x0000822c, 0x00000000}, + {0x00008230, 0x00000000}, + {0x00008234, 0x00000000}, + {0x00008238, 0x00000000}, + {0x0000823c, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000100}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x400000ff}, + {0x00008260, 0x00080922}, + {0x00008264, 0x88000010}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000000}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x00000000}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x00008300, 0x00000000}, + {0x00008304, 0x00000000}, + {0x00008308, 0x00000000}, + {0x0000830c, 0x00000000}, + {0x00008310, 0x00000000}, + {0x00008314, 0x00000000}, + {0x00008318, 0x00000000}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000007}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000e00}, + {0x00008338, 0x00070000}, + {0x0000833c, 0x00000000}, + {0x00008340, 0x000107ff}, + {0x00009808, 0x00000000}, + {0x0000980c, 0xad848e19}, + {0x00009810, 0x7d14e000}, + {0x00009814, 0x9c0a9f6b}, + {0x0000981c, 0x00000000}, + {0x0000982c, 0x0000a000}, + {0x00009830, 0x00000000}, + {0x0000983c, 0x00200400}, + {0x00009840, 0x206a002e}, + {0x0000984c, 0x1284233c}, + {0x00009854, 0x00000859}, + {0x00009900, 0x00000000}, + {0x00009904, 0x00000000}, + {0x00009908, 0x00000000}, + {0x0000990c, 0x00000000}, + {0x0000991c, 0x10000fff}, + {0x00009920, 0x05100000}, + {0x0000a920, 0x05100000}, + {0x0000b920, 0x05100000}, + {0x00009928, 0x00000001}, + {0x0000992c, 0x00000004}, + {0x00009934, 0x1e1f2022}, + {0x00009938, 0x0a0b0c0d}, + {0x0000993c, 0x00000000}, + {0x00009948, 0x9280b212}, + {0x0000994c, 0x00020028}, + {0x00009954, 0x5d50e188}, + {0x00009958, 0x00081fff}, + {0x0000c95c, 0x004b6a8e}, + {0x0000c968, 0x000003ce}, + {0x00009970, 0x190fb515}, + {0x00009974, 0x00000000}, + {0x00009978, 0x00000001}, + {0x0000997c, 0x00000000}, + {0x00009980, 0x00000000}, + {0x00009984, 0x00000000}, + {0x00009988, 0x00000000}, + {0x0000998c, 0x00000000}, + {0x00009990, 0x00000000}, + {0x00009994, 0x00000000}, + {0x00009998, 0x00000000}, + {0x0000999c, 0x00000000}, + {0x000099a0, 0x00000000}, + {0x000099a4, 0x00000001}, + {0x000099a8, 0x001fff00}, + {0x000099ac, 0x00000000}, + {0x000099b0, 0x03051000}, + {0x000099dc, 0x00000000}, + {0x000099e0, 0x00000200}, + {0x000099e4, 0xaaaaaaaa}, + {0x000099e8, 0x3c466478}, + {0x000099ec, 0x000000aa}, + {0x000099fc, 0x00001042}, + {0x00009b00, 0x00000000}, + {0x00009b04, 0x00000001}, + {0x00009b08, 0x00000002}, + {0x00009b0c, 0x00000003}, + {0x00009b10, 0x00000004}, + {0x00009b14, 0x00000005}, + {0x00009b18, 0x00000008}, + {0x00009b1c, 0x00000009}, + {0x00009b20, 0x0000000a}, + {0x00009b24, 0x0000000b}, + {0x00009b28, 0x0000000c}, + {0x00009b2c, 0x0000000d}, + {0x00009b30, 0x00000010}, + {0x00009b34, 0x00000011}, + {0x00009b38, 0x00000012}, + {0x00009b3c, 0x00000013}, + {0x00009b40, 0x00000014}, + {0x00009b44, 0x00000015}, + {0x00009b48, 0x00000018}, + {0x00009b4c, 0x00000019}, + {0x00009b50, 0x0000001a}, + {0x00009b54, 0x0000001b}, + {0x00009b58, 0x0000001c}, + {0x00009b5c, 0x0000001d}, + {0x00009b60, 0x00000020}, + {0x00009b64, 0x00000021}, + {0x00009b68, 0x00000022}, + {0x00009b6c, 0x00000023}, + {0x00009b70, 0x00000024}, + {0x00009b74, 0x00000025}, + {0x00009b78, 0x00000028}, + {0x00009b7c, 0x00000029}, + {0x00009b80, 0x0000002a}, + {0x00009b84, 0x0000002b}, + {0x00009b88, 0x0000002c}, + {0x00009b8c, 0x0000002d}, + {0x00009b90, 0x00000030}, + {0x00009b94, 0x00000031}, + {0x00009b98, 0x00000032}, + {0x00009b9c, 0x00000033}, + {0x00009ba0, 0x00000034}, + {0x00009ba4, 0x00000035}, + {0x00009ba8, 0x00000035}, + {0x00009bac, 0x00000035}, + {0x00009bb0, 0x00000035}, + {0x00009bb4, 0x00000035}, + {0x00009bb8, 0x00000035}, + {0x00009bbc, 0x00000035}, + {0x00009bc0, 0x00000035}, + {0x00009bc4, 0x00000035}, + {0x00009bc8, 0x00000035}, + {0x00009bcc, 0x00000035}, + {0x00009bd0, 0x00000035}, + {0x00009bd4, 0x00000035}, + {0x00009bd8, 0x00000035}, + {0x00009bdc, 0x00000035}, + {0x00009be0, 0x00000035}, + {0x00009be4, 0x00000035}, + {0x00009be8, 0x00000035}, + {0x00009bec, 0x00000035}, + {0x00009bf0, 0x00000035}, + {0x00009bf4, 0x00000035}, + {0x00009bf8, 0x00000010}, + {0x00009bfc, 0x0000001a}, + {0x0000a210, 0x40806333}, + {0x0000a214, 0x00106c10}, + {0x0000a218, 0x009c4060}, + {0x0000a220, 0x018830c6}, + {0x0000a224, 0x00000400}, + {0x0000a228, 0x00000bb5}, + {0x0000a22c, 0x00000011}, + {0x0000a234, 0x20202020}, + {0x0000a238, 0x20202020}, + {0x0000a23c, 0x13c889af}, + {0x0000a240, 0x38490a20}, + {0x0000a244, 0x00007bb6}, + {0x0000a248, 0x0fff3ffc}, + {0x0000a24c, 0x00000001}, + {0x0000a250, 0x0000a000}, + {0x0000a254, 0x00000000}, + {0x0000a258, 0x0cc75380}, + {0x0000a25c, 0x0f0f0f01}, + {0x0000a260, 0xdfa91f01}, + {0x0000a268, 0x00000000}, + {0x0000a26c, 0x0e79e5c6}, + {0x0000b26c, 0x0e79e5c6}, + {0x0000c26c, 0x0e79e5c6}, + {0x0000d270, 0x00820820}, + {0x0000a278, 0x1ce739ce}, + {0x0000a27c, 0x051701ce}, + {0x0000a338, 0x00000000}, + {0x0000a33c, 0x00000000}, + {0x0000a340, 0x00000000}, + {0x0000a344, 0x00000000}, + {0x0000a348, 0x3fffffff}, + {0x0000a34c, 0x3fffffff}, + {0x0000a350, 0x3fffffff}, + {0x0000a354, 0x0003ffff}, + {0x0000a358, 0x79a8aa1f}, + {0x0000d35c, 0x07ffffef}, + {0x0000d360, 0x0fffffe7}, + {0x0000d364, 0x17ffffe5}, + {0x0000d368, 0x1fffffe4}, + {0x0000d36c, 0x37ffffe3}, + {0x0000d370, 0x3fffffe3}, + {0x0000d374, 0x57ffffe3}, + {0x0000d378, 0x5fffffe2}, + {0x0000d37c, 0x7fffffe2}, + {0x0000d380, 0x7f3c7bba}, + {0x0000d384, 0xf3307ff0}, + {0x0000a388, 0x08000000}, + {0x0000a38c, 0x20202020}, + {0x0000a390, 0x20202020}, + {0x0000a394, 0x1ce739ce}, + {0x0000a398, 0x000001ce}, + {0x0000a39c, 0x00000001}, + {0x0000a3a0, 0x00000000}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0x00000000}, + {0x0000a3ac, 0x00000000}, + {0x0000a3b0, 0x00000000}, + {0x0000a3b4, 0x00000000}, + {0x0000a3b8, 0x00000000}, + {0x0000a3bc, 0x00000000}, + {0x0000a3c0, 0x00000000}, + {0x0000a3c4, 0x00000000}, + {0x0000a3c8, 0x00000246}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3dc, 0x1ce739ce}, + {0x0000a3e0, 0x000001ce}, }; static const u32 ar5416Bank0[][2] = { - { 0x000098b0, 0x1e5795e5 }, - { 0x000098e0, 0x02008020 }, + /* Addr allmodes */ + {0x000098b0, 0x1e5795e5}, + {0x000098e0, 0x02008020}, }; static const u32 ar5416BB_RfGain[][3] = { - { 0x00009a00, 0x00000000, 0x00000000 }, - { 0x00009a04, 0x00000040, 0x00000040 }, - { 0x00009a08, 0x00000080, 0x00000080 }, - { 0x00009a0c, 0x000001a1, 0x00000141 }, - { 0x00009a10, 0x000001e1, 0x00000181 }, - { 0x00009a14, 0x00000021, 0x000001c1 }, - { 0x00009a18, 0x00000061, 0x00000001 }, - { 0x00009a1c, 0x00000168, 0x00000041 }, - { 0x00009a20, 0x000001a8, 0x000001a8 }, - { 0x00009a24, 0x000001e8, 0x000001e8 }, - { 0x00009a28, 0x00000028, 0x00000028 }, - { 0x00009a2c, 0x00000068, 0x00000068 }, - { 0x00009a30, 0x00000189, 0x000000a8 }, - { 0x00009a34, 0x000001c9, 0x00000169 }, - { 0x00009a38, 0x00000009, 0x000001a9 }, - { 0x00009a3c, 0x00000049, 0x000001e9 }, - { 0x00009a40, 0x00000089, 0x00000029 }, - { 0x00009a44, 0x00000170, 0x00000069 }, - { 0x00009a48, 0x000001b0, 0x00000190 }, - { 0x00009a4c, 0x000001f0, 0x000001d0 }, - { 0x00009a50, 0x00000030, 0x00000010 }, - { 0x00009a54, 0x00000070, 0x00000050 }, - { 0x00009a58, 0x00000191, 0x00000090 }, - { 0x00009a5c, 0x000001d1, 0x00000151 }, - { 0x00009a60, 0x00000011, 0x00000191 }, - { 0x00009a64, 0x00000051, 0x000001d1 }, - { 0x00009a68, 0x00000091, 0x00000011 }, - { 0x00009a6c, 0x000001b8, 0x00000051 }, - { 0x00009a70, 0x000001f8, 0x00000198 }, - { 0x00009a74, 0x00000038, 0x000001d8 }, - { 0x00009a78, 0x00000078, 0x00000018 }, - { 0x00009a7c, 0x00000199, 0x00000058 }, - { 0x00009a80, 0x000001d9, 0x00000098 }, - { 0x00009a84, 0x00000019, 0x00000159 }, - { 0x00009a88, 0x00000059, 0x00000199 }, - { 0x00009a8c, 0x00000099, 0x000001d9 }, - { 0x00009a90, 0x000000d9, 0x00000019 }, - { 0x00009a94, 0x000000f9, 0x00000059 }, - { 0x00009a98, 0x000000f9, 0x00000099 }, - { 0x00009a9c, 0x000000f9, 0x000000d9 }, - { 0x00009aa0, 0x000000f9, 0x000000f9 }, - { 0x00009aa4, 0x000000f9, 0x000000f9 }, - { 0x00009aa8, 0x000000f9, 0x000000f9 }, - { 0x00009aac, 0x000000f9, 0x000000f9 }, - { 0x00009ab0, 0x000000f9, 0x000000f9 }, - { 0x00009ab4, 0x000000f9, 0x000000f9 }, - { 0x00009ab8, 0x000000f9, 0x000000f9 }, - { 0x00009abc, 0x000000f9, 0x000000f9 }, - { 0x00009ac0, 0x000000f9, 0x000000f9 }, - { 0x00009ac4, 0x000000f9, 0x000000f9 }, - { 0x00009ac8, 0x000000f9, 0x000000f9 }, - { 0x00009acc, 0x000000f9, 0x000000f9 }, - { 0x00009ad0, 0x000000f9, 0x000000f9 }, - { 0x00009ad4, 0x000000f9, 0x000000f9 }, - { 0x00009ad8, 0x000000f9, 0x000000f9 }, - { 0x00009adc, 0x000000f9, 0x000000f9 }, - { 0x00009ae0, 0x000000f9, 0x000000f9 }, - { 0x00009ae4, 0x000000f9, 0x000000f9 }, - { 0x00009ae8, 0x000000f9, 0x000000f9 }, - { 0x00009aec, 0x000000f9, 0x000000f9 }, - { 0x00009af0, 0x000000f9, 0x000000f9 }, - { 0x00009af4, 0x000000f9, 0x000000f9 }, - { 0x00009af8, 0x000000f9, 0x000000f9 }, - { 0x00009afc, 0x000000f9, 0x000000f9 }, + /* Addr 5G_HT20 5G_HT40 */ + {0x00009a00, 0x00000000, 0x00000000}, + {0x00009a04, 0x00000040, 0x00000040}, + {0x00009a08, 0x00000080, 0x00000080}, + {0x00009a0c, 0x000001a1, 0x00000141}, + {0x00009a10, 0x000001e1, 0x00000181}, + {0x00009a14, 0x00000021, 0x000001c1}, + {0x00009a18, 0x00000061, 0x00000001}, + {0x00009a1c, 0x00000168, 0x00000041}, + {0x00009a20, 0x000001a8, 0x000001a8}, + {0x00009a24, 0x000001e8, 0x000001e8}, + {0x00009a28, 0x00000028, 0x00000028}, + {0x00009a2c, 0x00000068, 0x00000068}, + {0x00009a30, 0x00000189, 0x000000a8}, + {0x00009a34, 0x000001c9, 0x00000169}, + {0x00009a38, 0x00000009, 0x000001a9}, + {0x00009a3c, 0x00000049, 0x000001e9}, + {0x00009a40, 0x00000089, 0x00000029}, + {0x00009a44, 0x00000170, 0x00000069}, + {0x00009a48, 0x000001b0, 0x00000190}, + {0x00009a4c, 0x000001f0, 0x000001d0}, + {0x00009a50, 0x00000030, 0x00000010}, + {0x00009a54, 0x00000070, 0x00000050}, + {0x00009a58, 0x00000191, 0x00000090}, + {0x00009a5c, 0x000001d1, 0x00000151}, + {0x00009a60, 0x00000011, 0x00000191}, + {0x00009a64, 0x00000051, 0x000001d1}, + {0x00009a68, 0x00000091, 0x00000011}, + {0x00009a6c, 0x000001b8, 0x00000051}, + {0x00009a70, 0x000001f8, 0x00000198}, + {0x00009a74, 0x00000038, 0x000001d8}, + {0x00009a78, 0x00000078, 0x00000018}, + {0x00009a7c, 0x00000199, 0x00000058}, + {0x00009a80, 0x000001d9, 0x00000098}, + {0x00009a84, 0x00000019, 0x00000159}, + {0x00009a88, 0x00000059, 0x00000199}, + {0x00009a8c, 0x00000099, 0x000001d9}, + {0x00009a90, 0x000000d9, 0x00000019}, + {0x00009a94, 0x000000f9, 0x00000059}, + {0x00009a98, 0x000000f9, 0x00000099}, + {0x00009a9c, 0x000000f9, 0x000000d9}, + {0x00009aa0, 0x000000f9, 0x000000f9}, + {0x00009aa4, 0x000000f9, 0x000000f9}, + {0x00009aa8, 0x000000f9, 0x000000f9}, + {0x00009aac, 0x000000f9, 0x000000f9}, + {0x00009ab0, 0x000000f9, 0x000000f9}, + {0x00009ab4, 0x000000f9, 0x000000f9}, + {0x00009ab8, 0x000000f9, 0x000000f9}, + {0x00009abc, 0x000000f9, 0x000000f9}, + {0x00009ac0, 0x000000f9, 0x000000f9}, + {0x00009ac4, 0x000000f9, 0x000000f9}, + {0x00009ac8, 0x000000f9, 0x000000f9}, + {0x00009acc, 0x000000f9, 0x000000f9}, + {0x00009ad0, 0x000000f9, 0x000000f9}, + {0x00009ad4, 0x000000f9, 0x000000f9}, + {0x00009ad8, 0x000000f9, 0x000000f9}, + {0x00009adc, 0x000000f9, 0x000000f9}, + {0x00009ae0, 0x000000f9, 0x000000f9}, + {0x00009ae4, 0x000000f9, 0x000000f9}, + {0x00009ae8, 0x000000f9, 0x000000f9}, + {0x00009aec, 0x000000f9, 0x000000f9}, + {0x00009af0, 0x000000f9, 0x000000f9}, + {0x00009af4, 0x000000f9, 0x000000f9}, + {0x00009af8, 0x000000f9, 0x000000f9}, + {0x00009afc, 0x000000f9, 0x000000f9}, }; static const u32 ar5416Bank1[][2] = { - { 0x000098b0, 0x02108421 }, - { 0x000098ec, 0x00000008 }, + /* Addr allmodes */ + {0x000098b0, 0x02108421}, + {0x000098ec, 0x00000008}, }; static const u32 ar5416Bank2[][2] = { - { 0x000098b0, 0x0e73ff17 }, - { 0x000098e0, 0x00000420 }, + /* Addr allmodes */ + {0x000098b0, 0x0e73ff17}, + {0x000098e0, 0x00000420}, }; static const u32 ar5416Bank3[][3] = { - { 0x000098f0, 0x01400018, 0x01c00018 }, + /* Addr 5G_HT20 5G_HT40 */ + {0x000098f0, 0x01400018, 0x01c00018}, }; static const u32 ar5416Bank6[][3] = { - - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00e00000, 0x00e00000 }, - { 0x0000989c, 0x005e0000, 0x005e0000 }, - { 0x0000989c, 0x00120000, 0x00120000 }, - { 0x0000989c, 0x00620000, 0x00620000 }, - { 0x0000989c, 0x00020000, 0x00020000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x40ff0000, 0x40ff0000 }, - { 0x0000989c, 0x005f0000, 0x005f0000 }, - { 0x0000989c, 0x00870000, 0x00870000 }, - { 0x0000989c, 0x00f90000, 0x00f90000 }, - { 0x0000989c, 0x007b0000, 0x007b0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00f50000, 0x00f50000 }, - { 0x0000989c, 0x00dc0000, 0x00dc0000 }, - { 0x0000989c, 0x00110000, 0x00110000 }, - { 0x0000989c, 0x006100a8, 0x006100a8 }, - { 0x0000989c, 0x004210a2, 0x004210a2 }, - { 0x0000989c, 0x0014008f, 0x0014008f }, - { 0x0000989c, 0x00c40003, 0x00c40003 }, - { 0x0000989c, 0x003000f2, 0x003000f2 }, - { 0x0000989c, 0x00440016, 0x00440016 }, - { 0x0000989c, 0x00410040, 0x00410040 }, - { 0x0000989c, 0x0001805e, 0x0001805e }, - { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, - { 0x0000989c, 0x000000f1, 0x000000f1 }, - { 0x0000989c, 0x00002081, 0x00002081 }, - { 0x0000989c, 0x000000d4, 0x000000d4 }, - { 0x000098d0, 0x0000000f, 0x0010000f }, + /* Addr 5G_HT20 5G_HT40 */ + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00e00000, 0x00e00000}, + {0x0000989c, 0x005e0000, 0x005e0000}, + {0x0000989c, 0x00120000, 0x00120000}, + {0x0000989c, 0x00620000, 0x00620000}, + {0x0000989c, 0x00020000, 0x00020000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x40ff0000, 0x40ff0000}, + {0x0000989c, 0x005f0000, 0x005f0000}, + {0x0000989c, 0x00870000, 0x00870000}, + {0x0000989c, 0x00f90000, 0x00f90000}, + {0x0000989c, 0x007b0000, 0x007b0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00f50000, 0x00f50000}, + {0x0000989c, 0x00dc0000, 0x00dc0000}, + {0x0000989c, 0x00110000, 0x00110000}, + {0x0000989c, 0x006100a8, 0x006100a8}, + {0x0000989c, 0x004210a2, 0x004210a2}, + {0x0000989c, 0x0014008f, 0x0014008f}, + {0x0000989c, 0x00c40003, 0x00c40003}, + {0x0000989c, 0x003000f2, 0x003000f2}, + {0x0000989c, 0x00440016, 0x00440016}, + {0x0000989c, 0x00410040, 0x00410040}, + {0x0000989c, 0x0001805e, 0x0001805e}, + {0x0000989c, 0x0000c0ab, 0x0000c0ab}, + {0x0000989c, 0x000000f1, 0x000000f1}, + {0x0000989c, 0x00002081, 0x00002081}, + {0x0000989c, 0x000000d4, 0x000000d4}, + {0x000098d0, 0x0000000f, 0x0010000f}, }; static const u32 ar5416Bank6TPC[][3] = { - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00e00000, 0x00e00000 }, - { 0x0000989c, 0x005e0000, 0x005e0000 }, - { 0x0000989c, 0x00120000, 0x00120000 }, - { 0x0000989c, 0x00620000, 0x00620000 }, - { 0x0000989c, 0x00020000, 0x00020000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x40ff0000, 0x40ff0000 }, - { 0x0000989c, 0x005f0000, 0x005f0000 }, - { 0x0000989c, 0x00870000, 0x00870000 }, - { 0x0000989c, 0x00f90000, 0x00f90000 }, - { 0x0000989c, 0x007b0000, 0x007b0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00f50000, 0x00f50000 }, - { 0x0000989c, 0x00dc0000, 0x00dc0000 }, - { 0x0000989c, 0x00110000, 0x00110000 }, - { 0x0000989c, 0x006100a8, 0x006100a8 }, - { 0x0000989c, 0x00423022, 0x00423022 }, - { 0x0000989c, 0x201400df, 0x201400df }, - { 0x0000989c, 0x00c40002, 0x00c40002 }, - { 0x0000989c, 0x003000f2, 0x003000f2 }, - { 0x0000989c, 0x00440016, 0x00440016 }, - { 0x0000989c, 0x00410040, 0x00410040 }, - { 0x0000989c, 0x0001805e, 0x0001805e }, - { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, - { 0x0000989c, 0x000000e1, 0x000000e1 }, - { 0x0000989c, 0x00007081, 0x00007081 }, - { 0x0000989c, 0x000000d4, 0x000000d4 }, - { 0x000098d0, 0x0000000f, 0x0010000f }, + /* Addr 5G_HT20 5G_HT40 */ + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00e00000, 0x00e00000}, + {0x0000989c, 0x005e0000, 0x005e0000}, + {0x0000989c, 0x00120000, 0x00120000}, + {0x0000989c, 0x00620000, 0x00620000}, + {0x0000989c, 0x00020000, 0x00020000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x40ff0000, 0x40ff0000}, + {0x0000989c, 0x005f0000, 0x005f0000}, + {0x0000989c, 0x00870000, 0x00870000}, + {0x0000989c, 0x00f90000, 0x00f90000}, + {0x0000989c, 0x007b0000, 0x007b0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00f50000, 0x00f50000}, + {0x0000989c, 0x00dc0000, 0x00dc0000}, + {0x0000989c, 0x00110000, 0x00110000}, + {0x0000989c, 0x006100a8, 0x006100a8}, + {0x0000989c, 0x00423022, 0x00423022}, + {0x0000989c, 0x201400df, 0x201400df}, + {0x0000989c, 0x00c40002, 0x00c40002}, + {0x0000989c, 0x003000f2, 0x003000f2}, + {0x0000989c, 0x00440016, 0x00440016}, + {0x0000989c, 0x00410040, 0x00410040}, + {0x0000989c, 0x0001805e, 0x0001805e}, + {0x0000989c, 0x0000c0ab, 0x0000c0ab}, + {0x0000989c, 0x000000e1, 0x000000e1}, + {0x0000989c, 0x00007081, 0x00007081}, + {0x0000989c, 0x000000d4, 0x000000d4}, + {0x000098d0, 0x0000000f, 0x0010000f}, }; static const u32 ar5416Bank7[][2] = { - { 0x0000989c, 0x00000500 }, - { 0x0000989c, 0x00000800 }, - { 0x000098cc, 0x0000000e }, + /* Addr allmodes */ + {0x0000989c, 0x00000500}, + {0x0000989c, 0x00000800}, + {0x000098cc, 0x0000000e}, }; static const u32 ar5416Addac[][2] = { - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000003 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x0000000c }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000030 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000060 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000058 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x000098cc, 0x00000000 }, -}; - -static const u32 ar5416Modes_9100[][6] = { - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, - { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 }, - { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e }, - { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, - { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, - { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, - { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, - { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d }, - { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 }, - { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 }, - { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e }, - { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff }, -#ifdef TB243 - { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, - { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, - { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, - { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 }, -#else - { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, - { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, - { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, - { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, -#endif - { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 }, - { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, - { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, - { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, - { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, - { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, - { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, - { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, - { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, - { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, - { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, - { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, - { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, - { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, - { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + /* Addr allmodes */ + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000003}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x0000000c}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000030}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000060}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000058}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x000098cc, 0x00000000}, }; -#endif /* INITVALS_AR5008_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index b2c17c98bb386ca4c1cbe62a0b64d705ea496511..3d2c8679bc85301f83f3499a161d3cbe91fbd1e3 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -19,7 +19,30 @@ #include "../regd.h" #include "ar9002_phy.h" -/* All code below is for non single-chip solutions */ +/* All code below is for AR5008, AR9001, AR9002 */ + +static const int firstep_table[] = +/* level: 0 1 2 3 4 5 6 7 8 */ + { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */ + +static const int cycpwrThr1_table[] = +/* level: 0 1 2 3 4 5 6 7 8 */ + { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */ + +/* + * register values to turn OFDM weak signal detection OFF + */ +static const int m1ThreshLow_off = 127; +static const int m2ThreshLow_off = 127; +static const int m1Thresh_off = 127; +static const int m2Thresh_off = 127; +static const int m2CountThr_off = 31; +static const int m2CountThrLow_off = 63; +static const int m1ThreshLowExt_off = 127; +static const int m2ThreshLowExt_off = 127; +static const int m1ThreshExt_off = 127; +static const int m2ThreshExt_off = 127; + /** * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters @@ -742,17 +765,6 @@ static int ar5008_hw_process_ini(struct ath_hw *ah, return -EINVAL; } - if (AR_SREV_9287_12_OR_LATER(ah)) { - /* Enable ASYNC FIFO */ - REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, - AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL); - REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO); - REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, - AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); - REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, - AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); - } - /* * Set correct baseband to analog shift setting to * access analog chips. @@ -1037,8 +1049,9 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah, return pll; } -static bool ar5008_hw_ani_control(struct ath_hw *ah, - enum ath9k_ani_cmd cmd, int param) +static bool ar5008_hw_ani_control_old(struct ath_hw *ah, + enum ath9k_ani_cmd cmd, + int param) { struct ar5416AniState *aniState = ah->curani; struct ath_common *common = ath9k_hw_common(ah); @@ -1220,129 +1233,377 @@ static bool ar5008_hw_ani_control(struct ath_hw *ah, return true; } +static bool ar5008_hw_ani_control_new(struct ath_hw *ah, + enum ath9k_ani_cmd cmd, + int param) +{ + struct ar5416AniState *aniState = ah->curani; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_channel *chan = ah->curchan; + s32 value, value2; + + switch (cmd & ah->ani_function) { + case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ + /* + * on == 1 means ofdm weak signal detection is ON + * on == 1 is the default, for less noise immunity + * + * on == 0 means ofdm weak signal detection is OFF + * on == 0 means more noise imm + */ + u32 on = param ? 1 : 0; + /* + * make register setting for default + * (weak sig detect ON) come from INI file + */ + int m1ThreshLow = on ? + aniState->iniDef.m1ThreshLow : m1ThreshLow_off; + int m2ThreshLow = on ? + aniState->iniDef.m2ThreshLow : m2ThreshLow_off; + int m1Thresh = on ? + aniState->iniDef.m1Thresh : m1Thresh_off; + int m2Thresh = on ? + aniState->iniDef.m2Thresh : m2Thresh_off; + int m2CountThr = on ? + aniState->iniDef.m2CountThr : m2CountThr_off; + int m2CountThrLow = on ? + aniState->iniDef.m2CountThrLow : m2CountThrLow_off; + int m1ThreshLowExt = on ? + aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off; + int m2ThreshLowExt = on ? + aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off; + int m1ThreshExt = on ? + aniState->iniDef.m1ThreshExt : m1ThreshExt_off; + int m2ThreshExt = on ? + aniState->iniDef.m2ThreshExt : m2ThreshExt_off; + + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M1_THRESH_LOW, + m1ThreshLow); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2_THRESH_LOW, + m2ThreshLow); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M1_THRESH, m1Thresh); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2_THRESH, m2Thresh); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2COUNT_THR, m2CountThr); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, + m2CountThrLow); + + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt); + + if (on) + REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); + else + REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); + + if (!on != aniState->ofdmWeakSigDetectOff) { + ath_print(common, ATH_DBG_ANI, + "** ch %d: ofdm weak signal: %s=>%s\n", + chan->channel, + !aniState->ofdmWeakSigDetectOff ? + "on" : "off", + on ? "on" : "off"); + if (on) + ah->stats.ast_ani_ofdmon++; + else + ah->stats.ast_ani_ofdmoff++; + aniState->ofdmWeakSigDetectOff = !on; + } + break; + } + case ATH9K_ANI_FIRSTEP_LEVEL:{ + u32 level = param; + + if (level >= ARRAY_SIZE(firstep_table)) { + ath_print(common, ATH_DBG_ANI, + "ATH9K_ANI_FIRSTEP_LEVEL: level " + "out of range (%u > %u)\n", + level, + (unsigned) ARRAY_SIZE(firstep_table)); + return false; + } + + /* + * make register setting relative to default + * from INI file & cap value + */ + value = firstep_table[level] - + firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + + aniState->iniDef.firstep; + if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN) + value = ATH9K_SIG_FIRSTEP_SETTING_MIN; + if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX) + value = ATH9K_SIG_FIRSTEP_SETTING_MAX; + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, + AR_PHY_FIND_SIG_FIRSTEP, + value); + /* + * we need to set first step low register too + * make register setting relative to default + * from INI file & cap value + */ + value2 = firstep_table[level] - + firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + + aniState->iniDef.firstepLow; + if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN) + value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN; + if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX) + value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX; + + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, + AR_PHY_FIND_SIG_FIRSTEP_LOW, value2); + + if (level != aniState->firstepLevel) { + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "firstep[level]=%d ini=%d\n", + chan->channel, + aniState->firstepLevel, + level, + ATH9K_ANI_FIRSTEP_LVL_NEW, + value, + aniState->iniDef.firstep); + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "firstep_low[level]=%d ini=%d\n", + chan->channel, + aniState->firstepLevel, + level, + ATH9K_ANI_FIRSTEP_LVL_NEW, + value2, + aniState->iniDef.firstepLow); + if (level > aniState->firstepLevel) + ah->stats.ast_ani_stepup++; + else if (level < aniState->firstepLevel) + ah->stats.ast_ani_stepdown++; + aniState->firstepLevel = level; + } + break; + } + case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ + u32 level = param; + + if (level >= ARRAY_SIZE(cycpwrThr1_table)) { + ath_print(common, ATH_DBG_ANI, + "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level " + "out of range (%u > %u)\n", + level, + (unsigned) ARRAY_SIZE(cycpwrThr1_table)); + return false; + } + /* + * make register setting relative to default + * from INI file & cap value + */ + value = cycpwrThr1_table[level] - + cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + + aniState->iniDef.cycpwrThr1; + if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN) + value = ATH9K_SIG_SPUR_IMM_SETTING_MIN; + if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX) + value = ATH9K_SIG_SPUR_IMM_SETTING_MAX; + REG_RMW_FIELD(ah, AR_PHY_TIMING5, + AR_PHY_TIMING5_CYCPWR_THR1, + value); + + /* + * set AR_PHY_EXT_CCA for extension channel + * make register setting relative to default + * from INI file & cap value + */ + value2 = cycpwrThr1_table[level] - + cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + + aniState->iniDef.cycpwrThr1Ext; + if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN) + value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN; + if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX) + value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX; + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA, + AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2); + + if (level != aniState->spurImmunityLevel) { + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "cycpwrThr1[level]=%d ini=%d\n", + chan->channel, + aniState->spurImmunityLevel, + level, + ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, + value, + aniState->iniDef.cycpwrThr1); + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "cycpwrThr1Ext[level]=%d ini=%d\n", + chan->channel, + aniState->spurImmunityLevel, + level, + ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, + value2, + aniState->iniDef.cycpwrThr1Ext); + if (level > aniState->spurImmunityLevel) + ah->stats.ast_ani_spurup++; + else if (level < aniState->spurImmunityLevel) + ah->stats.ast_ani_spurdown++; + aniState->spurImmunityLevel = level; + } + break; + } + case ATH9K_ANI_MRC_CCK: + /* + * You should not see this as AR5008, AR9001, AR9002 + * does not have hardware support for MRC CCK. + */ + WARN_ON(1); + break; + case ATH9K_ANI_PRESENT: + break; + default: + ath_print(common, ATH_DBG_ANI, + "invalid cmd %u\n", cmd); + return false; + } + + ath_print(common, ATH_DBG_ANI, + "ANI parameters: SI=%d, ofdmWS=%s FS=%d " + "MRCcck=%s listenTime=%d CC=%d listen=%d " + "ofdmErrs=%d cckErrs=%d\n", + aniState->spurImmunityLevel, + !aniState->ofdmWeakSigDetectOff ? "on" : "off", + aniState->firstepLevel, + !aniState->mrcCCKOff ? "on" : "off", + aniState->listenTime, + aniState->cycleCount, + aniState->listenTime, + aniState->ofdmPhyErrCount, + aniState->cckPhyErrCount); + return true; +} + static void ar5008_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { - struct ath_common *common = ath9k_hw_common(ah); int16_t nf; nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 0] is %d\n", nf); - nfarray[0] = nf; + nfarray[0] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 1] is %d\n", nf); - nfarray[1] = nf; + nfarray[1] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 2] is %d\n", nf); - nfarray[2] = nf; + nfarray[2] = sign_extend(nf, 9); + + if (!IS_CHAN_HT40(ah->curchan)) + return; nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 0] is %d\n", nf); - nfarray[3] = nf; + nfarray[3] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 1] is %d\n", nf); - nfarray[4] = nf; + nfarray[4] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 2] is %d\n", nf); - nfarray[5] = nf; + nfarray[5] = sign_extend(nf, 9); } -static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) +/* + * Initialize the ANI register values with default (ini) values. + * This routine is called during a (full) hardware reset after + * all the registers are initialised from the INI. + */ +static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) { - struct ath9k_nfcal_hist *h; - int i, j; - int32_t val; - const u32 ar5416_cca_regs[6] = { - AR_PHY_CCA, - AR_PHY_CH1_CCA, - AR_PHY_CH2_CCA, - AR_PHY_EXT_CCA, - AR_PHY_CH1_EXT_CCA, - AR_PHY_CH2_EXT_CCA - }; - u8 chainmask, rx_chain_status; - - rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK); - if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) - chainmask = 0x9; - else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) { - if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4)) - chainmask = 0x1B; - else - chainmask = 0x09; - } else { - if (rx_chain_status & 0x4) - chainmask = 0x3F; - else if (rx_chain_status & 0x2) - chainmask = 0x1B; - else - chainmask = 0x09; - } - - h = ah->nfCalHist; - - for (i = 0; i < NUM_NF_READINGS; i++) { - if (chainmask & (1 << i)) { - val = REG_READ(ah, ar5416_cca_regs[i]); - val &= 0xFFFFFE00; - val |= (((u32) (h[i].privNF) << 1) & 0x1ff); - REG_WRITE(ah, ar5416_cca_regs[i], val); - } - } - - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, - AR_PHY_AGC_CONTROL_ENABLE_NF); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, - AR_PHY_AGC_CONTROL_NO_UPDATE_NF); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); - - for (j = 0; j < 5; j++) { - if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & - AR_PHY_AGC_CONTROL_NF) == 0) - break; - udelay(50); - } + struct ar5416AniState *aniState; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_channel *chan = ah->curchan; + struct ath9k_ani_default *iniDef; + int index; + u32 val; - ENABLE_REGWRITE_BUFFER(ah); + index = ath9k_hw_get_ani_channel_idx(ah, chan); + aniState = &ah->ani[index]; + ah->curani = aniState; + iniDef = &aniState->iniDef; - for (i = 0; i < NUM_NF_READINGS; i++) { - if (chainmask & (1 << i)) { - val = REG_READ(ah, ar5416_cca_regs[i]); - val &= 0xFFFFFE00; - val |= (((u32) (-50) << 1) & 0x1ff); - REG_WRITE(ah, ar5416_cca_regs[i], val); - } - } + ath_print(common, ATH_DBG_ANI, + "ver %d.%d opmode %u chan %d Mhz/0x%x\n", + ah->hw_version.macVersion, + ah->hw_version.macRev, + ah->opmode, + chan->channel, + chan->channelFlags); + + val = REG_READ(ah, AR_PHY_SFCORR); + iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH); + iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH); + iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR); + + val = REG_READ(ah, AR_PHY_SFCORR_LOW); + iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW); + iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW); + iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW); + + val = REG_READ(ah, AR_PHY_SFCORR_EXT); + iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH); + iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH); + iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW); + iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW); + iniDef->firstep = REG_READ_FIELD(ah, + AR_PHY_FIND_SIG, + AR_PHY_FIND_SIG_FIRSTEP); + iniDef->firstepLow = REG_READ_FIELD(ah, + AR_PHY_FIND_SIG_LOW, + AR_PHY_FIND_SIG_FIRSTEP_LOW); + iniDef->cycpwrThr1 = REG_READ_FIELD(ah, + AR_PHY_TIMING5, + AR_PHY_TIMING5_CYCPWR_THR1); + iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah, + AR_PHY_EXT_CCA, + AR_PHY_EXT_TIMING5_CYCPWR_THR1); + + /* these levels just got reset to defaults by the INI */ + aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; + aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; + aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; + aniState->mrcCCKOff = true; /* not available on pre AR9003 */ + + aniState->cycleCount = 0; +} - REGWRITE_BUFFER_FLUSH(ah); - DISABLE_REGWRITE_BUFFER(ah); +static void ar5008_hw_set_nf_limits(struct ath_hw *ah) +{ + ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ; + ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ; + ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_5416_2GHZ; + ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ; + ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ; + ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ; } void ar5008_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); + const u32 ar5416_cca_regs[6] = { + AR_PHY_CCA, + AR_PHY_CH1_CCA, + AR_PHY_CH2_CCA, + AR_PHY_EXT_CCA, + AR_PHY_CH1_EXT_CCA, + AR_PHY_CH2_EXT_CCA + }; priv_ops->rf_set_freq = ar5008_hw_set_channel; priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate; @@ -1361,9 +1622,13 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah) priv_ops->enable_rfkill = ar5008_hw_enable_rfkill; priv_ops->restore_chainmask = ar5008_restore_chainmask; priv_ops->set_diversity = ar5008_set_diversity; - priv_ops->ani_control = ar5008_hw_ani_control; priv_ops->do_getnf = ar5008_hw_do_getnf; - priv_ops->loadnf = ar5008_hw_loadnf; + + if (modparam_force_new_ani) { + priv_ops->ani_control = ar5008_hw_ani_control_new; + priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs; + } else + priv_ops->ani_control = ar5008_hw_ani_control_old; if (AR_SREV_9100(ah)) priv_ops->compute_pll_control = ar9100_hw_compute_pll_control; @@ -1371,4 +1636,7 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah) priv_ops->compute_pll_control = ar9160_hw_compute_pll_control; else priv_ops->compute_pll_control = ar5008_hw_compute_pll_control; + + ar5008_hw_set_nf_limits(ah); + memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs)); } diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h index 0b94bd385b0ab01540febe9f12f5d47c7a455576..69a94c7e45cb3df2b9b4ce79d5f89f0bdbe09b9d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h @@ -1,1254 +1,1357 @@ +/* + * Copyright (c) 2010 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +static const u32 ar5416Modes_9100[][6] = { + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180}, + {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0}, + {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf}, + {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810}, + {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a}, + {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303}, + {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200}, + {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001}, + {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007}, + {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0}, + {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2, 0x6c48b0e2}, + {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e}, + {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e}, + {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18}, + {0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00}, + {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0}, + {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081}, + {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0}, + {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016}, + {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d}, + {0x00009940, 0x00750604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204}, + {0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020}, + {0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e}, + {0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff}, + {0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0}, + {0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0}, + {0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0}, + {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120}, + {0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00}, + {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be}, + {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77}, + {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329}, + {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8}, + {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384}, + {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880}, + {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788}, + {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a}, + {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000}, + {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa}, + {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000}, + {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402}, + {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06}, + {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b}, + {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b}, + {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a}, + {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf}, + {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f}, + {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f}, + {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f}, + {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, +}; static const u32 ar5416Common_9100[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020015 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00020010, 0x00000003 }, - { 0x00020038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x40000000 }, - { 0x00008054, 0x00004000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x000080c0, 0x2a82301a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008120, 0x08f04800 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0x00000000 }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c4, 0x00000000 }, - { 0x000081d0, 0x00003210 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x00008300, 0x00000000 }, - { 0x00008304, 0x00000000 }, - { 0x00008308, 0x00000000 }, - { 0x0000830c, 0x00000000 }, - { 0x00008310, 0x00000000 }, - { 0x00008314, 0x00000000 }, - { 0x00008318, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000007 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00000000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x000107ff }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xad848e19 }, - { 0x00009810, 0x7d14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x00009840, 0x206a01ae }, - { 0x0000984c, 0x1284233c }, - { 0x00009854, 0x00000859 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x05100000 }, - { 0x0000a920, 0x05100000 }, - { 0x0000b920, 0x05100000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009948, 0x9280b212 }, - { 0x0000994c, 0x00020028 }, - { 0x0000c95c, 0x004b6a8e }, - { 0x0000c968, 0x000003ce }, - { 0x00009970, 0x190fb515 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x006f0000 }, - { 0x000099b0, 0x03051000 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000200 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099fc, 0x00001042 }, - { 0x00009b00, 0x00000000 }, - { 0x00009b04, 0x00000001 }, - { 0x00009b08, 0x00000002 }, - { 0x00009b0c, 0x00000003 }, - { 0x00009b10, 0x00000004 }, - { 0x00009b14, 0x00000005 }, - { 0x00009b18, 0x00000008 }, - { 0x00009b1c, 0x00000009 }, - { 0x00009b20, 0x0000000a }, - { 0x00009b24, 0x0000000b }, - { 0x00009b28, 0x0000000c }, - { 0x00009b2c, 0x0000000d }, - { 0x00009b30, 0x00000010 }, - { 0x00009b34, 0x00000011 }, - { 0x00009b38, 0x00000012 }, - { 0x00009b3c, 0x00000013 }, - { 0x00009b40, 0x00000014 }, - { 0x00009b44, 0x00000015 }, - { 0x00009b48, 0x00000018 }, - { 0x00009b4c, 0x00000019 }, - { 0x00009b50, 0x0000001a }, - { 0x00009b54, 0x0000001b }, - { 0x00009b58, 0x0000001c }, - { 0x00009b5c, 0x0000001d }, - { 0x00009b60, 0x00000020 }, - { 0x00009b64, 0x00000021 }, - { 0x00009b68, 0x00000022 }, - { 0x00009b6c, 0x00000023 }, - { 0x00009b70, 0x00000024 }, - { 0x00009b74, 0x00000025 }, - { 0x00009b78, 0x00000028 }, - { 0x00009b7c, 0x00000029 }, - { 0x00009b80, 0x0000002a }, - { 0x00009b84, 0x0000002b }, - { 0x00009b88, 0x0000002c }, - { 0x00009b8c, 0x0000002d }, - { 0x00009b90, 0x00000030 }, - { 0x00009b94, 0x00000031 }, - { 0x00009b98, 0x00000032 }, - { 0x00009b9c, 0x00000033 }, - { 0x00009ba0, 0x00000034 }, - { 0x00009ba4, 0x00000035 }, - { 0x00009ba8, 0x00000035 }, - { 0x00009bac, 0x00000035 }, - { 0x00009bb0, 0x00000035 }, - { 0x00009bb4, 0x00000035 }, - { 0x00009bb8, 0x00000035 }, - { 0x00009bbc, 0x00000035 }, - { 0x00009bc0, 0x00000035 }, - { 0x00009bc4, 0x00000035 }, - { 0x00009bc8, 0x00000035 }, - { 0x00009bcc, 0x00000035 }, - { 0x00009bd0, 0x00000035 }, - { 0x00009bd4, 0x00000035 }, - { 0x00009bd8, 0x00000035 }, - { 0x00009bdc, 0x00000035 }, - { 0x00009be0, 0x00000035 }, - { 0x00009be4, 0x00000035 }, - { 0x00009be8, 0x00000035 }, - { 0x00009bec, 0x00000035 }, - { 0x00009bf0, 0x00000035 }, - { 0x00009bf4, 0x00000035 }, - { 0x00009bf8, 0x00000010 }, - { 0x00009bfc, 0x0000001a }, - { 0x0000a210, 0x40806333 }, - { 0x0000a214, 0x00106c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x018830c6 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x001a0bb5 }, - { 0x0000a22c, 0x00000000 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a23c, 0x13c889ae }, - { 0x0000a240, 0x38490a20 }, - { 0x0000a244, 0x00007bb6 }, - { 0x0000a248, 0x0fff3ffc }, - { 0x0000a24c, 0x00000001 }, - { 0x0000a250, 0x0000a000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0cc75380 }, - { 0x0000a25c, 0x0f0f0f01 }, - { 0x0000a260, 0xdfa91f01 }, - { 0x0000a268, 0x00000001 }, - { 0x0000a26c, 0x0ebae9c6 }, - { 0x0000b26c, 0x0ebae9c6 }, - { 0x0000c26c, 0x0ebae9c6 }, - { 0x0000d270, 0x00820820 }, - { 0x0000a278, 0x1ce739ce }, - { 0x0000a27c, 0x050701ce }, - { 0x0000a338, 0x00000000 }, - { 0x0000a33c, 0x00000000 }, - { 0x0000a340, 0x00000000 }, - { 0x0000a344, 0x00000000 }, - { 0x0000a348, 0x3fffffff }, - { 0x0000a34c, 0x3fffffff }, - { 0x0000a350, 0x3fffffff }, - { 0x0000a354, 0x0003ffff }, - { 0x0000a358, 0x79a8aa33 }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x1ce739ce }, - { 0x0000a398, 0x000001ce }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3c8, 0x00000246 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x1ce739ce }, - { 0x0000a3e0, 0x000001ce }, + /* Addr allmodes */ + {0x0000000c, 0x00000000}, + {0x00000030, 0x00020015}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000008}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00000054, 0x0000001f}, + {0x00000800, 0x00000000}, + {0x00000804, 0x00000000}, + {0x00000808, 0x00000000}, + {0x0000080c, 0x00000000}, + {0x00000810, 0x00000000}, + {0x00000814, 0x00000000}, + {0x00000818, 0x00000000}, + {0x0000081c, 0x00000000}, + {0x00000820, 0x00000000}, + {0x00000824, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x00001230, 0x00000000}, + {0x00001270, 0x00000000}, + {0x00001038, 0x00000000}, + {0x00001078, 0x00000000}, + {0x000010b8, 0x00000000}, + {0x000010f8, 0x00000000}, + {0x00001138, 0x00000000}, + {0x00001178, 0x00000000}, + {0x000011b8, 0x00000000}, + {0x000011f8, 0x00000000}, + {0x00001238, 0x00000000}, + {0x00001278, 0x00000000}, + {0x000012b8, 0x00000000}, + {0x000012f8, 0x00000000}, + {0x00001338, 0x00000000}, + {0x00001378, 0x00000000}, + {0x000013b8, 0x00000000}, + {0x000013f8, 0x00000000}, + {0x00001438, 0x00000000}, + {0x00001478, 0x00000000}, + {0x000014b8, 0x00000000}, + {0x000014f8, 0x00000000}, + {0x00001538, 0x00000000}, + {0x00001578, 0x00000000}, + {0x000015b8, 0x00000000}, + {0x000015f8, 0x00000000}, + {0x00001638, 0x00000000}, + {0x00001678, 0x00000000}, + {0x000016b8, 0x00000000}, + {0x000016f8, 0x00000000}, + {0x00001738, 0x00000000}, + {0x00001778, 0x00000000}, + {0x000017b8, 0x00000000}, + {0x000017f8, 0x00000000}, + {0x0000103c, 0x00000000}, + {0x0000107c, 0x00000000}, + {0x000010bc, 0x00000000}, + {0x000010fc, 0x00000000}, + {0x0000113c, 0x00000000}, + {0x0000117c, 0x00000000}, + {0x000011bc, 0x00000000}, + {0x000011fc, 0x00000000}, + {0x0000123c, 0x00000000}, + {0x0000127c, 0x00000000}, + {0x000012bc, 0x00000000}, + {0x000012fc, 0x00000000}, + {0x0000133c, 0x00000000}, + {0x0000137c, 0x00000000}, + {0x000013bc, 0x00000000}, + {0x000013fc, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00020010, 0x00000003}, + {0x00020038, 0x000004c2}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000700}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008048, 0x40000000}, + {0x00008054, 0x00004000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x000080c0, 0x2a82301a}, + {0x000080c4, 0x05dc01e0}, + {0x000080c8, 0x1f402710}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00001e00}, + {0x000080d4, 0x00000000}, + {0x000080d8, 0x00400000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x003f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080f8, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00020000}, + {0x00008104, 0x00000001}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000168}, + {0x00008118, 0x000100aa}, + {0x0000811c, 0x00003210}, + {0x00008120, 0x08f04800}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x00000000}, + {0x00008144, 0x00000000}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x32143320}, + {0x00008174, 0xfaa4fa50}, + {0x00008178, 0x00000100}, + {0x0000817c, 0x00000000}, + {0x000081c4, 0x00000000}, + {0x000081d0, 0x00003210}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008200, 0x00000000}, + {0x00008204, 0x00000000}, + {0x00008208, 0x00000000}, + {0x0000820c, 0x00000000}, + {0x00008210, 0x00000000}, + {0x00008214, 0x00000000}, + {0x00008218, 0x00000000}, + {0x0000821c, 0x00000000}, + {0x00008220, 0x00000000}, + {0x00008224, 0x00000000}, + {0x00008228, 0x00000000}, + {0x0000822c, 0x00000000}, + {0x00008230, 0x00000000}, + {0x00008234, 0x00000000}, + {0x00008238, 0x00000000}, + {0x0000823c, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000100}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x400000ff}, + {0x00008260, 0x00080922}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000000}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x00000000}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x00008300, 0x00000000}, + {0x00008304, 0x00000000}, + {0x00008308, 0x00000000}, + {0x0000830c, 0x00000000}, + {0x00008310, 0x00000000}, + {0x00008314, 0x00000000}, + {0x00008318, 0x00000000}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000007}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000e00}, + {0x00008338, 0x00000000}, + {0x0000833c, 0x00000000}, + {0x00008340, 0x000107ff}, + {0x00009808, 0x00000000}, + {0x0000980c, 0xad848e19}, + {0x00009810, 0x7d14e000}, + {0x00009814, 0x9c0a9f6b}, + {0x0000981c, 0x00000000}, + {0x0000982c, 0x0000a000}, + {0x00009830, 0x00000000}, + {0x0000983c, 0x00200400}, + {0x00009840, 0x206a01ae}, + {0x0000984c, 0x1284233c}, + {0x00009854, 0x00000859}, + {0x00009900, 0x00000000}, + {0x00009904, 0x00000000}, + {0x00009908, 0x00000000}, + {0x0000990c, 0x00000000}, + {0x0000991c, 0x10000fff}, + {0x00009920, 0x05100000}, + {0x0000a920, 0x05100000}, + {0x0000b920, 0x05100000}, + {0x00009928, 0x00000001}, + {0x0000992c, 0x00000004}, + {0x00009934, 0x1e1f2022}, + {0x00009938, 0x0a0b0c0d}, + {0x0000993c, 0x00000000}, + {0x00009948, 0x9280b212}, + {0x0000994c, 0x00020028}, + {0x0000c95c, 0x004b6a8e}, + {0x0000c968, 0x000003ce}, + {0x00009970, 0x190fb515}, + {0x00009974, 0x00000000}, + {0x00009978, 0x00000001}, + {0x0000997c, 0x00000000}, + {0x00009980, 0x00000000}, + {0x00009984, 0x00000000}, + {0x00009988, 0x00000000}, + {0x0000998c, 0x00000000}, + {0x00009990, 0x00000000}, + {0x00009994, 0x00000000}, + {0x00009998, 0x00000000}, + {0x0000999c, 0x00000000}, + {0x000099a0, 0x00000000}, + {0x000099a4, 0x00000001}, + {0x000099a8, 0x201fff00}, + {0x000099ac, 0x006f0000}, + {0x000099b0, 0x03051000}, + {0x000099dc, 0x00000000}, + {0x000099e0, 0x00000200}, + {0x000099e4, 0xaaaaaaaa}, + {0x000099e8, 0x3c466478}, + {0x000099ec, 0x0cc80caa}, + {0x000099fc, 0x00001042}, + {0x00009b00, 0x00000000}, + {0x00009b04, 0x00000001}, + {0x00009b08, 0x00000002}, + {0x00009b0c, 0x00000003}, + {0x00009b10, 0x00000004}, + {0x00009b14, 0x00000005}, + {0x00009b18, 0x00000008}, + {0x00009b1c, 0x00000009}, + {0x00009b20, 0x0000000a}, + {0x00009b24, 0x0000000b}, + {0x00009b28, 0x0000000c}, + {0x00009b2c, 0x0000000d}, + {0x00009b30, 0x00000010}, + {0x00009b34, 0x00000011}, + {0x00009b38, 0x00000012}, + {0x00009b3c, 0x00000013}, + {0x00009b40, 0x00000014}, + {0x00009b44, 0x00000015}, + {0x00009b48, 0x00000018}, + {0x00009b4c, 0x00000019}, + {0x00009b50, 0x0000001a}, + {0x00009b54, 0x0000001b}, + {0x00009b58, 0x0000001c}, + {0x00009b5c, 0x0000001d}, + {0x00009b60, 0x00000020}, + {0x00009b64, 0x00000021}, + {0x00009b68, 0x00000022}, + {0x00009b6c, 0x00000023}, + {0x00009b70, 0x00000024}, + {0x00009b74, 0x00000025}, + {0x00009b78, 0x00000028}, + {0x00009b7c, 0x00000029}, + {0x00009b80, 0x0000002a}, + {0x00009b84, 0x0000002b}, + {0x00009b88, 0x0000002c}, + {0x00009b8c, 0x0000002d}, + {0x00009b90, 0x00000030}, + {0x00009b94, 0x00000031}, + {0x00009b98, 0x00000032}, + {0x00009b9c, 0x00000033}, + {0x00009ba0, 0x00000034}, + {0x00009ba4, 0x00000035}, + {0x00009ba8, 0x00000035}, + {0x00009bac, 0x00000035}, + {0x00009bb0, 0x00000035}, + {0x00009bb4, 0x00000035}, + {0x00009bb8, 0x00000035}, + {0x00009bbc, 0x00000035}, + {0x00009bc0, 0x00000035}, + {0x00009bc4, 0x00000035}, + {0x00009bc8, 0x00000035}, + {0x00009bcc, 0x00000035}, + {0x00009bd0, 0x00000035}, + {0x00009bd4, 0x00000035}, + {0x00009bd8, 0x00000035}, + {0x00009bdc, 0x00000035}, + {0x00009be0, 0x00000035}, + {0x00009be4, 0x00000035}, + {0x00009be8, 0x00000035}, + {0x00009bec, 0x00000035}, + {0x00009bf0, 0x00000035}, + {0x00009bf4, 0x00000035}, + {0x00009bf8, 0x00000010}, + {0x00009bfc, 0x0000001a}, + {0x0000a210, 0x40806333}, + {0x0000a214, 0x00106c10}, + {0x0000a218, 0x009c4060}, + {0x0000a220, 0x018830c6}, + {0x0000a224, 0x00000400}, + {0x0000a228, 0x001a0bb5}, + {0x0000a22c, 0x00000000}, + {0x0000a234, 0x20202020}, + {0x0000a238, 0x20202020}, + {0x0000a23c, 0x13c889af}, + {0x0000a240, 0x38490a20}, + {0x0000a244, 0x00007bb6}, + {0x0000a248, 0x0fff3ffc}, + {0x0000a24c, 0x00000001}, + {0x0000a250, 0x0000e000}, + {0x0000a254, 0x00000000}, + {0x0000a258, 0x0cc75380}, + {0x0000a25c, 0x0f0f0f01}, + {0x0000a260, 0xdfa91f01}, + {0x0000a268, 0x00000001}, + {0x0000a26c, 0x0ebae9c6}, + {0x0000b26c, 0x0ebae9c6}, + {0x0000c26c, 0x0ebae9c6}, + {0x0000d270, 0x00820820}, + {0x0000a278, 0x1ce739ce}, + {0x0000a27c, 0x050701ce}, + {0x0000a338, 0x00000000}, + {0x0000a33c, 0x00000000}, + {0x0000a340, 0x00000000}, + {0x0000a344, 0x00000000}, + {0x0000a348, 0x3fffffff}, + {0x0000a34c, 0x3fffffff}, + {0x0000a350, 0x3fffffff}, + {0x0000a354, 0x0003ffff}, + {0x0000a358, 0x79a8aa33}, + {0x0000d35c, 0x07ffffef}, + {0x0000d360, 0x0fffffe7}, + {0x0000d364, 0x17ffffe5}, + {0x0000d368, 0x1fffffe4}, + {0x0000d36c, 0x37ffffe3}, + {0x0000d370, 0x3fffffe3}, + {0x0000d374, 0x57ffffe3}, + {0x0000d378, 0x5fffffe2}, + {0x0000d37c, 0x7fffffe2}, + {0x0000d380, 0x7f3c7bba}, + {0x0000d384, 0xf3307ff0}, + {0x0000a388, 0x0c000000}, + {0x0000a38c, 0x20202020}, + {0x0000a390, 0x20202020}, + {0x0000a394, 0x1ce739ce}, + {0x0000a398, 0x000001ce}, + {0x0000a39c, 0x00000001}, + {0x0000a3a0, 0x00000000}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0x00000000}, + {0x0000a3ac, 0x00000000}, + {0x0000a3b0, 0x00000000}, + {0x0000a3b4, 0x00000000}, + {0x0000a3b8, 0x00000000}, + {0x0000a3bc, 0x00000000}, + {0x0000a3c0, 0x00000000}, + {0x0000a3c4, 0x00000000}, + {0x0000a3c8, 0x00000246}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3dc, 0x1ce739ce}, + {0x0000a3e0, 0x000001ce}, }; static const u32 ar5416Bank0_9100[][2] = { - { 0x000098b0, 0x1e5795e5 }, - { 0x000098e0, 0x02008020 }, + /* Addr allmodes */ + {0x000098b0, 0x1e5795e5}, + {0x000098e0, 0x02008020}, }; static const u32 ar5416BB_RfGain_9100[][3] = { - { 0x00009a00, 0x00000000, 0x00000000 }, - { 0x00009a04, 0x00000040, 0x00000040 }, - { 0x00009a08, 0x00000080, 0x00000080 }, - { 0x00009a0c, 0x000001a1, 0x00000141 }, - { 0x00009a10, 0x000001e1, 0x00000181 }, - { 0x00009a14, 0x00000021, 0x000001c1 }, - { 0x00009a18, 0x00000061, 0x00000001 }, - { 0x00009a1c, 0x00000168, 0x00000041 }, - { 0x00009a20, 0x000001a8, 0x000001a8 }, - { 0x00009a24, 0x000001e8, 0x000001e8 }, - { 0x00009a28, 0x00000028, 0x00000028 }, - { 0x00009a2c, 0x00000068, 0x00000068 }, - { 0x00009a30, 0x00000189, 0x000000a8 }, - { 0x00009a34, 0x000001c9, 0x00000169 }, - { 0x00009a38, 0x00000009, 0x000001a9 }, - { 0x00009a3c, 0x00000049, 0x000001e9 }, - { 0x00009a40, 0x00000089, 0x00000029 }, - { 0x00009a44, 0x00000170, 0x00000069 }, - { 0x00009a48, 0x000001b0, 0x00000190 }, - { 0x00009a4c, 0x000001f0, 0x000001d0 }, - { 0x00009a50, 0x00000030, 0x00000010 }, - { 0x00009a54, 0x00000070, 0x00000050 }, - { 0x00009a58, 0x00000191, 0x00000090 }, - { 0x00009a5c, 0x000001d1, 0x00000151 }, - { 0x00009a60, 0x00000011, 0x00000191 }, - { 0x00009a64, 0x00000051, 0x000001d1 }, - { 0x00009a68, 0x00000091, 0x00000011 }, - { 0x00009a6c, 0x000001b8, 0x00000051 }, - { 0x00009a70, 0x000001f8, 0x00000198 }, - { 0x00009a74, 0x00000038, 0x000001d8 }, - { 0x00009a78, 0x00000078, 0x00000018 }, - { 0x00009a7c, 0x00000199, 0x00000058 }, - { 0x00009a80, 0x000001d9, 0x00000098 }, - { 0x00009a84, 0x00000019, 0x00000159 }, - { 0x00009a88, 0x00000059, 0x00000199 }, - { 0x00009a8c, 0x00000099, 0x000001d9 }, - { 0x00009a90, 0x000000d9, 0x00000019 }, - { 0x00009a94, 0x000000f9, 0x00000059 }, - { 0x00009a98, 0x000000f9, 0x00000099 }, - { 0x00009a9c, 0x000000f9, 0x000000d9 }, - { 0x00009aa0, 0x000000f9, 0x000000f9 }, - { 0x00009aa4, 0x000000f9, 0x000000f9 }, - { 0x00009aa8, 0x000000f9, 0x000000f9 }, - { 0x00009aac, 0x000000f9, 0x000000f9 }, - { 0x00009ab0, 0x000000f9, 0x000000f9 }, - { 0x00009ab4, 0x000000f9, 0x000000f9 }, - { 0x00009ab8, 0x000000f9, 0x000000f9 }, - { 0x00009abc, 0x000000f9, 0x000000f9 }, - { 0x00009ac0, 0x000000f9, 0x000000f9 }, - { 0x00009ac4, 0x000000f9, 0x000000f9 }, - { 0x00009ac8, 0x000000f9, 0x000000f9 }, - { 0x00009acc, 0x000000f9, 0x000000f9 }, - { 0x00009ad0, 0x000000f9, 0x000000f9 }, - { 0x00009ad4, 0x000000f9, 0x000000f9 }, - { 0x00009ad8, 0x000000f9, 0x000000f9 }, - { 0x00009adc, 0x000000f9, 0x000000f9 }, - { 0x00009ae0, 0x000000f9, 0x000000f9 }, - { 0x00009ae4, 0x000000f9, 0x000000f9 }, - { 0x00009ae8, 0x000000f9, 0x000000f9 }, - { 0x00009aec, 0x000000f9, 0x000000f9 }, - { 0x00009af0, 0x000000f9, 0x000000f9 }, - { 0x00009af4, 0x000000f9, 0x000000f9 }, - { 0x00009af8, 0x000000f9, 0x000000f9 }, - { 0x00009afc, 0x000000f9, 0x000000f9 }, + /* Addr 5G_HT20 5G_HT40 */ + {0x00009a00, 0x00000000, 0x00000000}, + {0x00009a04, 0x00000040, 0x00000040}, + {0x00009a08, 0x00000080, 0x00000080}, + {0x00009a0c, 0x000001a1, 0x00000141}, + {0x00009a10, 0x000001e1, 0x00000181}, + {0x00009a14, 0x00000021, 0x000001c1}, + {0x00009a18, 0x00000061, 0x00000001}, + {0x00009a1c, 0x00000168, 0x00000041}, + {0x00009a20, 0x000001a8, 0x000001a8}, + {0x00009a24, 0x000001e8, 0x000001e8}, + {0x00009a28, 0x00000028, 0x00000028}, + {0x00009a2c, 0x00000068, 0x00000068}, + {0x00009a30, 0x00000189, 0x000000a8}, + {0x00009a34, 0x000001c9, 0x00000169}, + {0x00009a38, 0x00000009, 0x000001a9}, + {0x00009a3c, 0x00000049, 0x000001e9}, + {0x00009a40, 0x00000089, 0x00000029}, + {0x00009a44, 0x00000170, 0x00000069}, + {0x00009a48, 0x000001b0, 0x00000190}, + {0x00009a4c, 0x000001f0, 0x000001d0}, + {0x00009a50, 0x00000030, 0x00000010}, + {0x00009a54, 0x00000070, 0x00000050}, + {0x00009a58, 0x00000191, 0x00000090}, + {0x00009a5c, 0x000001d1, 0x00000151}, + {0x00009a60, 0x00000011, 0x00000191}, + {0x00009a64, 0x00000051, 0x000001d1}, + {0x00009a68, 0x00000091, 0x00000011}, + {0x00009a6c, 0x000001b8, 0x00000051}, + {0x00009a70, 0x000001f8, 0x00000198}, + {0x00009a74, 0x00000038, 0x000001d8}, + {0x00009a78, 0x00000078, 0x00000018}, + {0x00009a7c, 0x00000199, 0x00000058}, + {0x00009a80, 0x000001d9, 0x00000098}, + {0x00009a84, 0x00000019, 0x00000159}, + {0x00009a88, 0x00000059, 0x00000199}, + {0x00009a8c, 0x00000099, 0x000001d9}, + {0x00009a90, 0x000000d9, 0x00000019}, + {0x00009a94, 0x000000f9, 0x00000059}, + {0x00009a98, 0x000000f9, 0x00000099}, + {0x00009a9c, 0x000000f9, 0x000000d9}, + {0x00009aa0, 0x000000f9, 0x000000f9}, + {0x00009aa4, 0x000000f9, 0x000000f9}, + {0x00009aa8, 0x000000f9, 0x000000f9}, + {0x00009aac, 0x000000f9, 0x000000f9}, + {0x00009ab0, 0x000000f9, 0x000000f9}, + {0x00009ab4, 0x000000f9, 0x000000f9}, + {0x00009ab8, 0x000000f9, 0x000000f9}, + {0x00009abc, 0x000000f9, 0x000000f9}, + {0x00009ac0, 0x000000f9, 0x000000f9}, + {0x00009ac4, 0x000000f9, 0x000000f9}, + {0x00009ac8, 0x000000f9, 0x000000f9}, + {0x00009acc, 0x000000f9, 0x000000f9}, + {0x00009ad0, 0x000000f9, 0x000000f9}, + {0x00009ad4, 0x000000f9, 0x000000f9}, + {0x00009ad8, 0x000000f9, 0x000000f9}, + {0x00009adc, 0x000000f9, 0x000000f9}, + {0x00009ae0, 0x000000f9, 0x000000f9}, + {0x00009ae4, 0x000000f9, 0x000000f9}, + {0x00009ae8, 0x000000f9, 0x000000f9}, + {0x00009aec, 0x000000f9, 0x000000f9}, + {0x00009af0, 0x000000f9, 0x000000f9}, + {0x00009af4, 0x000000f9, 0x000000f9}, + {0x00009af8, 0x000000f9, 0x000000f9}, + {0x00009afc, 0x000000f9, 0x000000f9}, }; static const u32 ar5416Bank1_9100[][2] = { - { 0x000098b0, 0x02108421}, - { 0x000098ec, 0x00000008}, + /* Addr allmodes */ + {0x000098b0, 0x02108421}, + {0x000098ec, 0x00000008}, }; static const u32 ar5416Bank2_9100[][2] = { - { 0x000098b0, 0x0e73ff17}, - { 0x000098e0, 0x00000420}, + /* Addr allmodes */ + {0x000098b0, 0x0e73ff17}, + {0x000098e0, 0x00000420}, }; static const u32 ar5416Bank3_9100[][3] = { - { 0x000098f0, 0x01400018, 0x01c00018 }, + /* Addr 5G_HT20 5G_HT40 */ + {0x000098f0, 0x01400018, 0x01c00018}, }; static const u32 ar5416Bank6_9100[][3] = { - - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00e00000, 0x00e00000 }, - { 0x0000989c, 0x005e0000, 0x005e0000 }, - { 0x0000989c, 0x00120000, 0x00120000 }, - { 0x0000989c, 0x00620000, 0x00620000 }, - { 0x0000989c, 0x00020000, 0x00020000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x005f0000, 0x005f0000 }, - { 0x0000989c, 0x00870000, 0x00870000 }, - { 0x0000989c, 0x00f90000, 0x00f90000 }, - { 0x0000989c, 0x007b0000, 0x007b0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00f50000, 0x00f50000 }, - { 0x0000989c, 0x00dc0000, 0x00dc0000 }, - { 0x0000989c, 0x00110000, 0x00110000 }, - { 0x0000989c, 0x006100a8, 0x006100a8 }, - { 0x0000989c, 0x004210a2, 0x004210a2 }, - { 0x0000989c, 0x0014000f, 0x0014000f }, - { 0x0000989c, 0x00c40002, 0x00c40002 }, - { 0x0000989c, 0x003000f2, 0x003000f2 }, - { 0x0000989c, 0x00440016, 0x00440016 }, - { 0x0000989c, 0x00410040, 0x00410040 }, - { 0x0000989c, 0x000180d6, 0x000180d6 }, - { 0x0000989c, 0x0000c0aa, 0x0000c0aa }, - { 0x0000989c, 0x000000b1, 0x000000b1 }, - { 0x0000989c, 0x00002000, 0x00002000 }, - { 0x0000989c, 0x000000d4, 0x000000d4 }, - { 0x000098d0, 0x0000000f, 0x0010000f }, + /* Addr 5G_HT20 5G_HT40 */ + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00e00000, 0x00e00000}, + {0x0000989c, 0x005e0000, 0x005e0000}, + {0x0000989c, 0x00120000, 0x00120000}, + {0x0000989c, 0x00620000, 0x00620000}, + {0x0000989c, 0x00020000, 0x00020000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x005f0000, 0x005f0000}, + {0x0000989c, 0x00870000, 0x00870000}, + {0x0000989c, 0x00f90000, 0x00f90000}, + {0x0000989c, 0x007b0000, 0x007b0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00f50000, 0x00f50000}, + {0x0000989c, 0x00dc0000, 0x00dc0000}, + {0x0000989c, 0x00110000, 0x00110000}, + {0x0000989c, 0x006100a8, 0x006100a8}, + {0x0000989c, 0x004210a2, 0x004210a2}, + {0x0000989c, 0x0014000f, 0x0014000f}, + {0x0000989c, 0x00c40002, 0x00c40002}, + {0x0000989c, 0x003000f2, 0x003000f2}, + {0x0000989c, 0x00440016, 0x00440016}, + {0x0000989c, 0x00410040, 0x00410040}, + {0x0000989c, 0x000180d6, 0x000180d6}, + {0x0000989c, 0x0000c0aa, 0x0000c0aa}, + {0x0000989c, 0x000000b1, 0x000000b1}, + {0x0000989c, 0x00002000, 0x00002000}, + {0x0000989c, 0x000000d4, 0x000000d4}, + {0x000098d0, 0x0000000f, 0x0010000f}, }; - static const u32 ar5416Bank6TPC_9100[][3] = { - - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00e00000, 0x00e00000 }, - { 0x0000989c, 0x005e0000, 0x005e0000 }, - { 0x0000989c, 0x00120000, 0x00120000 }, - { 0x0000989c, 0x00620000, 0x00620000 }, - { 0x0000989c, 0x00020000, 0x00020000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x40ff0000, 0x40ff0000 }, - { 0x0000989c, 0x005f0000, 0x005f0000 }, - { 0x0000989c, 0x00870000, 0x00870000 }, - { 0x0000989c, 0x00f90000, 0x00f90000 }, - { 0x0000989c, 0x007b0000, 0x007b0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00f50000, 0x00f50000 }, - { 0x0000989c, 0x00dc0000, 0x00dc0000 }, - { 0x0000989c, 0x00110000, 0x00110000 }, - { 0x0000989c, 0x006100a8, 0x006100a8 }, - { 0x0000989c, 0x00423022, 0x00423022 }, - { 0x0000989c, 0x2014008f, 0x2014008f }, - { 0x0000989c, 0x00c40002, 0x00c40002 }, - { 0x0000989c, 0x003000f2, 0x003000f2 }, - { 0x0000989c, 0x00440016, 0x00440016 }, - { 0x0000989c, 0x00410040, 0x00410040 }, - { 0x0000989c, 0x0001805e, 0x0001805e }, - { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, - { 0x0000989c, 0x000000e1, 0x000000e1 }, - { 0x0000989c, 0x00007080, 0x00007080 }, - { 0x0000989c, 0x000000d4, 0x000000d4 }, - { 0x000098d0, 0x0000000f, 0x0010000f }, + /* Addr 5G_HT20 5G_HT40 */ + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00e00000, 0x00e00000}, + {0x0000989c, 0x005e0000, 0x005e0000}, + {0x0000989c, 0x00120000, 0x00120000}, + {0x0000989c, 0x00620000, 0x00620000}, + {0x0000989c, 0x00020000, 0x00020000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x40ff0000, 0x40ff0000}, + {0x0000989c, 0x005f0000, 0x005f0000}, + {0x0000989c, 0x00870000, 0x00870000}, + {0x0000989c, 0x00f90000, 0x00f90000}, + {0x0000989c, 0x007b0000, 0x007b0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00f50000, 0x00f50000}, + {0x0000989c, 0x00dc0000, 0x00dc0000}, + {0x0000989c, 0x00110000, 0x00110000}, + {0x0000989c, 0x006100a8, 0x006100a8}, + {0x0000989c, 0x00423022, 0x00423022}, + {0x0000989c, 0x2014008f, 0x2014008f}, + {0x0000989c, 0x00c40002, 0x00c40002}, + {0x0000989c, 0x003000f2, 0x003000f2}, + {0x0000989c, 0x00440016, 0x00440016}, + {0x0000989c, 0x00410040, 0x00410040}, + {0x0000989c, 0x0001805e, 0x0001805e}, + {0x0000989c, 0x0000c0ab, 0x0000c0ab}, + {0x0000989c, 0x000000e1, 0x000000e1}, + {0x0000989c, 0x00007080, 0x00007080}, + {0x0000989c, 0x000000d4, 0x000000d4}, + {0x000098d0, 0x0000000f, 0x0010000f}, }; static const u32 ar5416Bank7_9100[][2] = { - { 0x0000989c, 0x00000500 }, - { 0x0000989c, 0x00000800 }, - { 0x000098cc, 0x0000000e }, + /* Addr allmodes */ + {0x0000989c, 0x00000500}, + {0x0000989c, 0x00000800}, + {0x000098cc, 0x0000000e}, }; static const u32 ar5416Addac_9100[][2] = { - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000010 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x000000c0 }, - {0x0000989c, 0x00000015 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x000098cc, 0x00000000 }, + /* Addr allmodes */ + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000010}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x000000c0}, + {0x0000989c, 0x00000015}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x000098cc, 0x00000000}, }; static const u32 ar5416Modes_9160[][6] = { - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, - { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, - { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 }, - { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, - { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e }, - { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, - { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, - { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, - { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, - { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 }, - { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, - { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, - { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, - { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, - { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce }, - { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 }, - { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, - { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, - { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, - { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, - { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, - { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, - { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, - { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, - { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, - { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, - { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, - { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, - { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, - { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180}, + {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0}, + {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf}, + {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810}, + {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a}, + {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303}, + {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200}, + {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001}, + {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007}, + {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0}, + {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68}, + {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2, 0x6c48b0e2}, + {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e}, + {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e}, + {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18}, + {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00}, + {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0}, + {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081}, + {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0}, + {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016}, + {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d}, + {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020}, + {0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40}, + {0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40}, + {0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40}, + {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120}, + {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce}, + {0x000099bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00}, + {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be}, + {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77}, + {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329}, + {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8}, + {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384}, + {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880}, + {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788}, + {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120}, + {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a}, + {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000}, + {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa}, + {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000}, + {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402}, + {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06}, + {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b}, + {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b}, + {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a}, + {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf}, + {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f}, + {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f}, + {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f}, + {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, }; static const u32 ar5416Common_9160[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020015 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00007010, 0x00000020 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x40000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x000080c0, 0x2a82301a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008120, 0x08f04800 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0xffffffff }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c4, 0x00000000 }, - { 0x000081d0, 0x00003210 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x00008300, 0x00000000 }, - { 0x00008304, 0x00000000 }, - { 0x00008308, 0x00000000 }, - { 0x0000830c, 0x00000000 }, - { 0x00008310, 0x00000000 }, - { 0x00008314, 0x00000000 }, - { 0x00008318, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000007 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00ff0000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x000107ff }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xad848e19 }, - { 0x00009810, 0x7d14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x00009840, 0x206a01ae }, - { 0x0000984c, 0x1284233c }, - { 0x00009854, 0x00000859 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x05100000 }, - { 0x0000a920, 0x05100000 }, - { 0x0000b920, 0x05100000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009948, 0x9280b212 }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5f3ca3de }, - { 0x00009958, 0x2108ecff }, - { 0x00009940, 0x00750604 }, - { 0x0000c95c, 0x004b6a8e }, - { 0x00009970, 0x190fb515 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x006f0000 }, - { 0x000099b0, 0x03051000 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000200 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099fc, 0x00001042 }, - { 0x00009b00, 0x00000000 }, - { 0x00009b04, 0x00000001 }, - { 0x00009b08, 0x00000002 }, - { 0x00009b0c, 0x00000003 }, - { 0x00009b10, 0x00000004 }, - { 0x00009b14, 0x00000005 }, - { 0x00009b18, 0x00000008 }, - { 0x00009b1c, 0x00000009 }, - { 0x00009b20, 0x0000000a }, - { 0x00009b24, 0x0000000b }, - { 0x00009b28, 0x0000000c }, - { 0x00009b2c, 0x0000000d }, - { 0x00009b30, 0x00000010 }, - { 0x00009b34, 0x00000011 }, - { 0x00009b38, 0x00000012 }, - { 0x00009b3c, 0x00000013 }, - { 0x00009b40, 0x00000014 }, - { 0x00009b44, 0x00000015 }, - { 0x00009b48, 0x00000018 }, - { 0x00009b4c, 0x00000019 }, - { 0x00009b50, 0x0000001a }, - { 0x00009b54, 0x0000001b }, - { 0x00009b58, 0x0000001c }, - { 0x00009b5c, 0x0000001d }, - { 0x00009b60, 0x00000020 }, - { 0x00009b64, 0x00000021 }, - { 0x00009b68, 0x00000022 }, - { 0x00009b6c, 0x00000023 }, - { 0x00009b70, 0x00000024 }, - { 0x00009b74, 0x00000025 }, - { 0x00009b78, 0x00000028 }, - { 0x00009b7c, 0x00000029 }, - { 0x00009b80, 0x0000002a }, - { 0x00009b84, 0x0000002b }, - { 0x00009b88, 0x0000002c }, - { 0x00009b8c, 0x0000002d }, - { 0x00009b90, 0x00000030 }, - { 0x00009b94, 0x00000031 }, - { 0x00009b98, 0x00000032 }, - { 0x00009b9c, 0x00000033 }, - { 0x00009ba0, 0x00000034 }, - { 0x00009ba4, 0x00000035 }, - { 0x00009ba8, 0x00000035 }, - { 0x00009bac, 0x00000035 }, - { 0x00009bb0, 0x00000035 }, - { 0x00009bb4, 0x00000035 }, - { 0x00009bb8, 0x00000035 }, - { 0x00009bbc, 0x00000035 }, - { 0x00009bc0, 0x00000035 }, - { 0x00009bc4, 0x00000035 }, - { 0x00009bc8, 0x00000035 }, - { 0x00009bcc, 0x00000035 }, - { 0x00009bd0, 0x00000035 }, - { 0x00009bd4, 0x00000035 }, - { 0x00009bd8, 0x00000035 }, - { 0x00009bdc, 0x00000035 }, - { 0x00009be0, 0x00000035 }, - { 0x00009be4, 0x00000035 }, - { 0x00009be8, 0x00000035 }, - { 0x00009bec, 0x00000035 }, - { 0x00009bf0, 0x00000035 }, - { 0x00009bf4, 0x00000035 }, - { 0x00009bf8, 0x00000010 }, - { 0x00009bfc, 0x0000001a }, - { 0x0000a210, 0x40806333 }, - { 0x0000a214, 0x00106c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x018830c6 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x001a0bb5 }, - { 0x0000a22c, 0x00000000 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a23c, 0x13c889af }, - { 0x0000a240, 0x38490a20 }, - { 0x0000a244, 0x00007bb6 }, - { 0x0000a248, 0x0fff3ffc }, - { 0x0000a24c, 0x00000001 }, - { 0x0000a250, 0x0000e000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0cc75380 }, - { 0x0000a25c, 0x0f0f0f01 }, - { 0x0000a260, 0xdfa91f01 }, - { 0x0000a268, 0x00000001 }, - { 0x0000a26c, 0x0ebae9c6 }, - { 0x0000b26c, 0x0ebae9c6 }, - { 0x0000c26c, 0x0ebae9c6 }, - { 0x0000d270, 0x00820820 }, - { 0x0000a278, 0x1ce739ce }, - { 0x0000a27c, 0x050701ce }, - { 0x0000a338, 0x00000000 }, - { 0x0000a33c, 0x00000000 }, - { 0x0000a340, 0x00000000 }, - { 0x0000a344, 0x00000000 }, - { 0x0000a348, 0x3fffffff }, - { 0x0000a34c, 0x3fffffff }, - { 0x0000a350, 0x3fffffff }, - { 0x0000a354, 0x0003ffff }, - { 0x0000a358, 0x79bfaa03 }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x1ce739ce }, - { 0x0000a398, 0x000001ce }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3c8, 0x00000246 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x1ce739ce }, - { 0x0000a3e0, 0x000001ce }, + /* Addr allmodes */ + {0x0000000c, 0x00000000}, + {0x00000030, 0x00020015}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000008}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00000054, 0x0000001f}, + {0x00000800, 0x00000000}, + {0x00000804, 0x00000000}, + {0x00000808, 0x00000000}, + {0x0000080c, 0x00000000}, + {0x00000810, 0x00000000}, + {0x00000814, 0x00000000}, + {0x00000818, 0x00000000}, + {0x0000081c, 0x00000000}, + {0x00000820, 0x00000000}, + {0x00000824, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x00001230, 0x00000000}, + {0x00001270, 0x00000000}, + {0x00001038, 0x00000000}, + {0x00001078, 0x00000000}, + {0x000010b8, 0x00000000}, + {0x000010f8, 0x00000000}, + {0x00001138, 0x00000000}, + {0x00001178, 0x00000000}, + {0x000011b8, 0x00000000}, + {0x000011f8, 0x00000000}, + {0x00001238, 0x00000000}, + {0x00001278, 0x00000000}, + {0x000012b8, 0x00000000}, + {0x000012f8, 0x00000000}, + {0x00001338, 0x00000000}, + {0x00001378, 0x00000000}, + {0x000013b8, 0x00000000}, + {0x000013f8, 0x00000000}, + {0x00001438, 0x00000000}, + {0x00001478, 0x00000000}, + {0x000014b8, 0x00000000}, + {0x000014f8, 0x00000000}, + {0x00001538, 0x00000000}, + {0x00001578, 0x00000000}, + {0x000015b8, 0x00000000}, + {0x000015f8, 0x00000000}, + {0x00001638, 0x00000000}, + {0x00001678, 0x00000000}, + {0x000016b8, 0x00000000}, + {0x000016f8, 0x00000000}, + {0x00001738, 0x00000000}, + {0x00001778, 0x00000000}, + {0x000017b8, 0x00000000}, + {0x000017f8, 0x00000000}, + {0x0000103c, 0x00000000}, + {0x0000107c, 0x00000000}, + {0x000010bc, 0x00000000}, + {0x000010fc, 0x00000000}, + {0x0000113c, 0x00000000}, + {0x0000117c, 0x00000000}, + {0x000011bc, 0x00000000}, + {0x000011fc, 0x00000000}, + {0x0000123c, 0x00000000}, + {0x0000127c, 0x00000000}, + {0x000012bc, 0x00000000}, + {0x000012fc, 0x00000000}, + {0x0000133c, 0x00000000}, + {0x0000137c, 0x00000000}, + {0x000013bc, 0x00000000}, + {0x000013fc, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00004030, 0x00000002}, + {0x0000403c, 0x00000002}, + {0x00007010, 0x00000020}, + {0x00007038, 0x000004c2}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000700}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008048, 0x40000000}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x000080c0, 0x2a82301a}, + {0x000080c4, 0x05dc01e0}, + {0x000080c8, 0x1f402710}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00001e00}, + {0x000080d4, 0x00000000}, + {0x000080d8, 0x00400000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x003f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080f8, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00020000}, + {0x00008104, 0x00000001}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000168}, + {0x00008118, 0x000100aa}, + {0x0000811c, 0x00003210}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x00000000}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x32143320}, + {0x00008174, 0xfaa4fa50}, + {0x00008178, 0x00000100}, + {0x0000817c, 0x00000000}, + {0x000081c4, 0x00000000}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008200, 0x00000000}, + {0x00008204, 0x00000000}, + {0x00008208, 0x00000000}, + {0x0000820c, 0x00000000}, + {0x00008210, 0x00000000}, + {0x00008214, 0x00000000}, + {0x00008218, 0x00000000}, + {0x0000821c, 0x00000000}, + {0x00008220, 0x00000000}, + {0x00008224, 0x00000000}, + {0x00008228, 0x00000000}, + {0x0000822c, 0x00000000}, + {0x00008230, 0x00000000}, + {0x00008234, 0x00000000}, + {0x00008238, 0x00000000}, + {0x0000823c, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000100}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x400000ff}, + {0x00008260, 0x00080922}, + {0x00008264, 0x88a00010}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000000}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x00000000}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x00008300, 0x00000000}, + {0x00008304, 0x00000000}, + {0x00008308, 0x00000000}, + {0x0000830c, 0x00000000}, + {0x00008310, 0x00000000}, + {0x00008314, 0x00000000}, + {0x00008318, 0x00000000}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000007}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000e00}, + {0x00008338, 0x00ff0000}, + {0x0000833c, 0x00000000}, + {0x00008340, 0x000107ff}, + {0x00009808, 0x00000000}, + {0x0000980c, 0xad848e19}, + {0x00009810, 0x7d14e000}, + {0x00009814, 0x9c0a9f6b}, + {0x0000981c, 0x00000000}, + {0x0000982c, 0x0000a000}, + {0x00009830, 0x00000000}, + {0x0000983c, 0x00200400}, + {0x00009840, 0x206a01ae}, + {0x0000984c, 0x1284233c}, + {0x00009854, 0x00000859}, + {0x00009900, 0x00000000}, + {0x00009904, 0x00000000}, + {0x00009908, 0x00000000}, + {0x0000990c, 0x00000000}, + {0x0000991c, 0x10000fff}, + {0x00009920, 0x05100000}, + {0x0000a920, 0x05100000}, + {0x0000b920, 0x05100000}, + {0x00009928, 0x00000001}, + {0x0000992c, 0x00000004}, + {0x00009934, 0x1e1f2022}, + {0x00009938, 0x0a0b0c0d}, + {0x0000993c, 0x00000000}, + {0x00009948, 0x9280b212}, + {0x0000994c, 0x00020028}, + {0x00009954, 0x5f3ca3de}, + {0x00009958, 0x2108ecff}, + {0x00009940, 0x00750604}, + {0x0000c95c, 0x004b6a8e}, + {0x00009970, 0x190fb515}, + {0x00009974, 0x00000000}, + {0x00009978, 0x00000001}, + {0x0000997c, 0x00000000}, + {0x00009980, 0x00000000}, + {0x00009984, 0x00000000}, + {0x00009988, 0x00000000}, + {0x0000998c, 0x00000000}, + {0x00009990, 0x00000000}, + {0x00009994, 0x00000000}, + {0x00009998, 0x00000000}, + {0x0000999c, 0x00000000}, + {0x000099a0, 0x00000000}, + {0x000099a4, 0x00000001}, + {0x000099a8, 0x201fff00}, + {0x000099ac, 0x006f0000}, + {0x000099b0, 0x03051000}, + {0x000099dc, 0x00000000}, + {0x000099e0, 0x00000200}, + {0x000099e4, 0xaaaaaaaa}, + {0x000099e8, 0x3c466478}, + {0x000099ec, 0x0cc80caa}, + {0x000099fc, 0x00001042}, + {0x00009b00, 0x00000000}, + {0x00009b04, 0x00000001}, + {0x00009b08, 0x00000002}, + {0x00009b0c, 0x00000003}, + {0x00009b10, 0x00000004}, + {0x00009b14, 0x00000005}, + {0x00009b18, 0x00000008}, + {0x00009b1c, 0x00000009}, + {0x00009b20, 0x0000000a}, + {0x00009b24, 0x0000000b}, + {0x00009b28, 0x0000000c}, + {0x00009b2c, 0x0000000d}, + {0x00009b30, 0x00000010}, + {0x00009b34, 0x00000011}, + {0x00009b38, 0x00000012}, + {0x00009b3c, 0x00000013}, + {0x00009b40, 0x00000014}, + {0x00009b44, 0x00000015}, + {0x00009b48, 0x00000018}, + {0x00009b4c, 0x00000019}, + {0x00009b50, 0x0000001a}, + {0x00009b54, 0x0000001b}, + {0x00009b58, 0x0000001c}, + {0x00009b5c, 0x0000001d}, + {0x00009b60, 0x00000020}, + {0x00009b64, 0x00000021}, + {0x00009b68, 0x00000022}, + {0x00009b6c, 0x00000023}, + {0x00009b70, 0x00000024}, + {0x00009b74, 0x00000025}, + {0x00009b78, 0x00000028}, + {0x00009b7c, 0x00000029}, + {0x00009b80, 0x0000002a}, + {0x00009b84, 0x0000002b}, + {0x00009b88, 0x0000002c}, + {0x00009b8c, 0x0000002d}, + {0x00009b90, 0x00000030}, + {0x00009b94, 0x00000031}, + {0x00009b98, 0x00000032}, + {0x00009b9c, 0x00000033}, + {0x00009ba0, 0x00000034}, + {0x00009ba4, 0x00000035}, + {0x00009ba8, 0x00000035}, + {0x00009bac, 0x00000035}, + {0x00009bb0, 0x00000035}, + {0x00009bb4, 0x00000035}, + {0x00009bb8, 0x00000035}, + {0x00009bbc, 0x00000035}, + {0x00009bc0, 0x00000035}, + {0x00009bc4, 0x00000035}, + {0x00009bc8, 0x00000035}, + {0x00009bcc, 0x00000035}, + {0x00009bd0, 0x00000035}, + {0x00009bd4, 0x00000035}, + {0x00009bd8, 0x00000035}, + {0x00009bdc, 0x00000035}, + {0x00009be0, 0x00000035}, + {0x00009be4, 0x00000035}, + {0x00009be8, 0x00000035}, + {0x00009bec, 0x00000035}, + {0x00009bf0, 0x00000035}, + {0x00009bf4, 0x00000035}, + {0x00009bf8, 0x00000010}, + {0x00009bfc, 0x0000001a}, + {0x0000a210, 0x40806333}, + {0x0000a214, 0x00106c10}, + {0x0000a218, 0x009c4060}, + {0x0000a220, 0x018830c6}, + {0x0000a224, 0x00000400}, + {0x0000a228, 0x001a0bb5}, + {0x0000a22c, 0x00000000}, + {0x0000a234, 0x20202020}, + {0x0000a238, 0x20202020}, + {0x0000a23c, 0x13c889af}, + {0x0000a240, 0x38490a20}, + {0x0000a244, 0x00007bb6}, + {0x0000a248, 0x0fff3ffc}, + {0x0000a24c, 0x00000001}, + {0x0000a250, 0x0000e000}, + {0x0000a254, 0x00000000}, + {0x0000a258, 0x0cc75380}, + {0x0000a25c, 0x0f0f0f01}, + {0x0000a260, 0xdfa91f01}, + {0x0000a268, 0x00000001}, + {0x0000a26c, 0x0e79e5c6}, + {0x0000b26c, 0x0e79e5c6}, + {0x0000c26c, 0x0e79e5c6}, + {0x0000d270, 0x00820820}, + {0x0000a278, 0x1ce739ce}, + {0x0000a27c, 0x050701ce}, + {0x0000a338, 0x00000000}, + {0x0000a33c, 0x00000000}, + {0x0000a340, 0x00000000}, + {0x0000a344, 0x00000000}, + {0x0000a348, 0x3fffffff}, + {0x0000a34c, 0x3fffffff}, + {0x0000a350, 0x3fffffff}, + {0x0000a354, 0x0003ffff}, + {0x0000a358, 0x79bfaa03}, + {0x0000d35c, 0x07ffffef}, + {0x0000d360, 0x0fffffe7}, + {0x0000d364, 0x17ffffe5}, + {0x0000d368, 0x1fffffe4}, + {0x0000d36c, 0x37ffffe3}, + {0x0000d370, 0x3fffffe3}, + {0x0000d374, 0x57ffffe3}, + {0x0000d378, 0x5fffffe2}, + {0x0000d37c, 0x7fffffe2}, + {0x0000d380, 0x7f3c7bba}, + {0x0000d384, 0xf3307ff0}, + {0x0000a388, 0x0c000000}, + {0x0000a38c, 0x20202020}, + {0x0000a390, 0x20202020}, + {0x0000a394, 0x1ce739ce}, + {0x0000a398, 0x000001ce}, + {0x0000a39c, 0x00000001}, + {0x0000a3a0, 0x00000000}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0x00000000}, + {0x0000a3ac, 0x00000000}, + {0x0000a3b0, 0x00000000}, + {0x0000a3b4, 0x00000000}, + {0x0000a3b8, 0x00000000}, + {0x0000a3bc, 0x00000000}, + {0x0000a3c0, 0x00000000}, + {0x0000a3c4, 0x00000000}, + {0x0000a3c8, 0x00000246}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3dc, 0x1ce739ce}, + {0x0000a3e0, 0x000001ce}, }; static const u32 ar5416Bank0_9160[][2] = { - { 0x000098b0, 0x1e5795e5 }, - { 0x000098e0, 0x02008020 }, + /* Addr allmodes */ + {0x000098b0, 0x1e5795e5}, + {0x000098e0, 0x02008020}, }; static const u32 ar5416BB_RfGain_9160[][3] = { - { 0x00009a00, 0x00000000, 0x00000000 }, - { 0x00009a04, 0x00000040, 0x00000040 }, - { 0x00009a08, 0x00000080, 0x00000080 }, - { 0x00009a0c, 0x000001a1, 0x00000141 }, - { 0x00009a10, 0x000001e1, 0x00000181 }, - { 0x00009a14, 0x00000021, 0x000001c1 }, - { 0x00009a18, 0x00000061, 0x00000001 }, - { 0x00009a1c, 0x00000168, 0x00000041 }, - { 0x00009a20, 0x000001a8, 0x000001a8 }, - { 0x00009a24, 0x000001e8, 0x000001e8 }, - { 0x00009a28, 0x00000028, 0x00000028 }, - { 0x00009a2c, 0x00000068, 0x00000068 }, - { 0x00009a30, 0x00000189, 0x000000a8 }, - { 0x00009a34, 0x000001c9, 0x00000169 }, - { 0x00009a38, 0x00000009, 0x000001a9 }, - { 0x00009a3c, 0x00000049, 0x000001e9 }, - { 0x00009a40, 0x00000089, 0x00000029 }, - { 0x00009a44, 0x00000170, 0x00000069 }, - { 0x00009a48, 0x000001b0, 0x00000190 }, - { 0x00009a4c, 0x000001f0, 0x000001d0 }, - { 0x00009a50, 0x00000030, 0x00000010 }, - { 0x00009a54, 0x00000070, 0x00000050 }, - { 0x00009a58, 0x00000191, 0x00000090 }, - { 0x00009a5c, 0x000001d1, 0x00000151 }, - { 0x00009a60, 0x00000011, 0x00000191 }, - { 0x00009a64, 0x00000051, 0x000001d1 }, - { 0x00009a68, 0x00000091, 0x00000011 }, - { 0x00009a6c, 0x000001b8, 0x00000051 }, - { 0x00009a70, 0x000001f8, 0x00000198 }, - { 0x00009a74, 0x00000038, 0x000001d8 }, - { 0x00009a78, 0x00000078, 0x00000018 }, - { 0x00009a7c, 0x00000199, 0x00000058 }, - { 0x00009a80, 0x000001d9, 0x00000098 }, - { 0x00009a84, 0x00000019, 0x00000159 }, - { 0x00009a88, 0x00000059, 0x00000199 }, - { 0x00009a8c, 0x00000099, 0x000001d9 }, - { 0x00009a90, 0x000000d9, 0x00000019 }, - { 0x00009a94, 0x000000f9, 0x00000059 }, - { 0x00009a98, 0x000000f9, 0x00000099 }, - { 0x00009a9c, 0x000000f9, 0x000000d9 }, - { 0x00009aa0, 0x000000f9, 0x000000f9 }, - { 0x00009aa4, 0x000000f9, 0x000000f9 }, - { 0x00009aa8, 0x000000f9, 0x000000f9 }, - { 0x00009aac, 0x000000f9, 0x000000f9 }, - { 0x00009ab0, 0x000000f9, 0x000000f9 }, - { 0x00009ab4, 0x000000f9, 0x000000f9 }, - { 0x00009ab8, 0x000000f9, 0x000000f9 }, - { 0x00009abc, 0x000000f9, 0x000000f9 }, - { 0x00009ac0, 0x000000f9, 0x000000f9 }, - { 0x00009ac4, 0x000000f9, 0x000000f9 }, - { 0x00009ac8, 0x000000f9, 0x000000f9 }, - { 0x00009acc, 0x000000f9, 0x000000f9 }, - { 0x00009ad0, 0x000000f9, 0x000000f9 }, - { 0x00009ad4, 0x000000f9, 0x000000f9 }, - { 0x00009ad8, 0x000000f9, 0x000000f9 }, - { 0x00009adc, 0x000000f9, 0x000000f9 }, - { 0x00009ae0, 0x000000f9, 0x000000f9 }, - { 0x00009ae4, 0x000000f9, 0x000000f9 }, - { 0x00009ae8, 0x000000f9, 0x000000f9 }, - { 0x00009aec, 0x000000f9, 0x000000f9 }, - { 0x00009af0, 0x000000f9, 0x000000f9 }, - { 0x00009af4, 0x000000f9, 0x000000f9 }, - { 0x00009af8, 0x000000f9, 0x000000f9 }, - { 0x00009afc, 0x000000f9, 0x000000f9 }, + /* Addr 5G_HT20 5G_HT40 */ + {0x00009a00, 0x00000000, 0x00000000}, + {0x00009a04, 0x00000040, 0x00000040}, + {0x00009a08, 0x00000080, 0x00000080}, + {0x00009a0c, 0x000001a1, 0x00000141}, + {0x00009a10, 0x000001e1, 0x00000181}, + {0x00009a14, 0x00000021, 0x000001c1}, + {0x00009a18, 0x00000061, 0x00000001}, + {0x00009a1c, 0x00000168, 0x00000041}, + {0x00009a20, 0x000001a8, 0x000001a8}, + {0x00009a24, 0x000001e8, 0x000001e8}, + {0x00009a28, 0x00000028, 0x00000028}, + {0x00009a2c, 0x00000068, 0x00000068}, + {0x00009a30, 0x00000189, 0x000000a8}, + {0x00009a34, 0x000001c9, 0x00000169}, + {0x00009a38, 0x00000009, 0x000001a9}, + {0x00009a3c, 0x00000049, 0x000001e9}, + {0x00009a40, 0x00000089, 0x00000029}, + {0x00009a44, 0x00000170, 0x00000069}, + {0x00009a48, 0x000001b0, 0x00000190}, + {0x00009a4c, 0x000001f0, 0x000001d0}, + {0x00009a50, 0x00000030, 0x00000010}, + {0x00009a54, 0x00000070, 0x00000050}, + {0x00009a58, 0x00000191, 0x00000090}, + {0x00009a5c, 0x000001d1, 0x00000151}, + {0x00009a60, 0x00000011, 0x00000191}, + {0x00009a64, 0x00000051, 0x000001d1}, + {0x00009a68, 0x00000091, 0x00000011}, + {0x00009a6c, 0x000001b8, 0x00000051}, + {0x00009a70, 0x000001f8, 0x00000198}, + {0x00009a74, 0x00000038, 0x000001d8}, + {0x00009a78, 0x00000078, 0x00000018}, + {0x00009a7c, 0x00000199, 0x00000058}, + {0x00009a80, 0x000001d9, 0x00000098}, + {0x00009a84, 0x00000019, 0x00000159}, + {0x00009a88, 0x00000059, 0x00000199}, + {0x00009a8c, 0x00000099, 0x000001d9}, + {0x00009a90, 0x000000d9, 0x00000019}, + {0x00009a94, 0x000000f9, 0x00000059}, + {0x00009a98, 0x000000f9, 0x00000099}, + {0x00009a9c, 0x000000f9, 0x000000d9}, + {0x00009aa0, 0x000000f9, 0x000000f9}, + {0x00009aa4, 0x000000f9, 0x000000f9}, + {0x00009aa8, 0x000000f9, 0x000000f9}, + {0x00009aac, 0x000000f9, 0x000000f9}, + {0x00009ab0, 0x000000f9, 0x000000f9}, + {0x00009ab4, 0x000000f9, 0x000000f9}, + {0x00009ab8, 0x000000f9, 0x000000f9}, + {0x00009abc, 0x000000f9, 0x000000f9}, + {0x00009ac0, 0x000000f9, 0x000000f9}, + {0x00009ac4, 0x000000f9, 0x000000f9}, + {0x00009ac8, 0x000000f9, 0x000000f9}, + {0x00009acc, 0x000000f9, 0x000000f9}, + {0x00009ad0, 0x000000f9, 0x000000f9}, + {0x00009ad4, 0x000000f9, 0x000000f9}, + {0x00009ad8, 0x000000f9, 0x000000f9}, + {0x00009adc, 0x000000f9, 0x000000f9}, + {0x00009ae0, 0x000000f9, 0x000000f9}, + {0x00009ae4, 0x000000f9, 0x000000f9}, + {0x00009ae8, 0x000000f9, 0x000000f9}, + {0x00009aec, 0x000000f9, 0x000000f9}, + {0x00009af0, 0x000000f9, 0x000000f9}, + {0x00009af4, 0x000000f9, 0x000000f9}, + {0x00009af8, 0x000000f9, 0x000000f9}, + {0x00009afc, 0x000000f9, 0x000000f9}, }; static const u32 ar5416Bank1_9160[][2] = { - { 0x000098b0, 0x02108421 }, - { 0x000098ec, 0x00000008 }, + /* Addr allmodes */ + {0x000098b0, 0x02108421}, + {0x000098ec, 0x00000008}, }; static const u32 ar5416Bank2_9160[][2] = { - { 0x000098b0, 0x0e73ff17 }, - { 0x000098e0, 0x00000420 }, + /* Addr allmodes */ + {0x000098b0, 0x0e73ff17}, + {0x000098e0, 0x00000420}, }; static const u32 ar5416Bank3_9160[][3] = { - { 0x000098f0, 0x01400018, 0x01c00018 }, + /* Addr 5G_HT20 5G_HT40 */ + {0x000098f0, 0x01400018, 0x01c00018}, }; static const u32 ar5416Bank6_9160[][3] = { - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00e00000, 0x00e00000 }, - { 0x0000989c, 0x005e0000, 0x005e0000 }, - { 0x0000989c, 0x00120000, 0x00120000 }, - { 0x0000989c, 0x00620000, 0x00620000 }, - { 0x0000989c, 0x00020000, 0x00020000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x40ff0000, 0x40ff0000 }, - { 0x0000989c, 0x005f0000, 0x005f0000 }, - { 0x0000989c, 0x00870000, 0x00870000 }, - { 0x0000989c, 0x00f90000, 0x00f90000 }, - { 0x0000989c, 0x007b0000, 0x007b0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00f50000, 0x00f50000 }, - { 0x0000989c, 0x00dc0000, 0x00dc0000 }, - { 0x0000989c, 0x00110000, 0x00110000 }, - { 0x0000989c, 0x006100a8, 0x006100a8 }, - { 0x0000989c, 0x004210a2, 0x004210a2 }, - { 0x0000989c, 0x0014008f, 0x0014008f }, - { 0x0000989c, 0x00c40003, 0x00c40003 }, - { 0x0000989c, 0x003000f2, 0x003000f2 }, - { 0x0000989c, 0x00440016, 0x00440016 }, - { 0x0000989c, 0x00410040, 0x00410040 }, - { 0x0000989c, 0x0001805e, 0x0001805e }, - { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, - { 0x0000989c, 0x000000f1, 0x000000f1 }, - { 0x0000989c, 0x00002081, 0x00002081 }, - { 0x0000989c, 0x000000d4, 0x000000d4 }, - { 0x000098d0, 0x0000000f, 0x0010000f }, + /* Addr 5G_HT20 5G_HT40 */ + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00e00000, 0x00e00000}, + {0x0000989c, 0x005e0000, 0x005e0000}, + {0x0000989c, 0x00120000, 0x00120000}, + {0x0000989c, 0x00620000, 0x00620000}, + {0x0000989c, 0x00020000, 0x00020000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x40ff0000, 0x40ff0000}, + {0x0000989c, 0x005f0000, 0x005f0000}, + {0x0000989c, 0x00870000, 0x00870000}, + {0x0000989c, 0x00f90000, 0x00f90000}, + {0x0000989c, 0x007b0000, 0x007b0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00f50000, 0x00f50000}, + {0x0000989c, 0x00dc0000, 0x00dc0000}, + {0x0000989c, 0x00110000, 0x00110000}, + {0x0000989c, 0x006100a8, 0x006100a8}, + {0x0000989c, 0x004210a2, 0x004210a2}, + {0x0000989c, 0x0014008f, 0x0014008f}, + {0x0000989c, 0x00c40003, 0x00c40003}, + {0x0000989c, 0x003000f2, 0x003000f2}, + {0x0000989c, 0x00440016, 0x00440016}, + {0x0000989c, 0x00410040, 0x00410040}, + {0x0000989c, 0x0001805e, 0x0001805e}, + {0x0000989c, 0x0000c0ab, 0x0000c0ab}, + {0x0000989c, 0x000000f1, 0x000000f1}, + {0x0000989c, 0x00002081, 0x00002081}, + {0x0000989c, 0x000000d4, 0x000000d4}, + {0x000098d0, 0x0000000f, 0x0010000f}, }; static const u32 ar5416Bank6TPC_9160[][3] = { - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00000000, 0x00000000 }, - { 0x0000989c, 0x00e00000, 0x00e00000 }, - { 0x0000989c, 0x005e0000, 0x005e0000 }, - { 0x0000989c, 0x00120000, 0x00120000 }, - { 0x0000989c, 0x00620000, 0x00620000 }, - { 0x0000989c, 0x00020000, 0x00020000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x40ff0000, 0x40ff0000 }, - { 0x0000989c, 0x005f0000, 0x005f0000 }, - { 0x0000989c, 0x00870000, 0x00870000 }, - { 0x0000989c, 0x00f90000, 0x00f90000 }, - { 0x0000989c, 0x007b0000, 0x007b0000 }, - { 0x0000989c, 0x00ff0000, 0x00ff0000 }, - { 0x0000989c, 0x00f50000, 0x00f50000 }, - { 0x0000989c, 0x00dc0000, 0x00dc0000 }, - { 0x0000989c, 0x00110000, 0x00110000 }, - { 0x0000989c, 0x006100a8, 0x006100a8 }, - { 0x0000989c, 0x00423022, 0x00423022 }, - { 0x0000989c, 0x2014008f, 0x2014008f }, - { 0x0000989c, 0x00c40002, 0x00c40002 }, - { 0x0000989c, 0x003000f2, 0x003000f2 }, - { 0x0000989c, 0x00440016, 0x00440016 }, - { 0x0000989c, 0x00410040, 0x00410040 }, - { 0x0000989c, 0x0001805e, 0x0001805e }, - { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, - { 0x0000989c, 0x000000e1, 0x000000e1 }, - { 0x0000989c, 0x00007080, 0x00007080 }, - { 0x0000989c, 0x000000d4, 0x000000d4 }, - { 0x000098d0, 0x0000000f, 0x0010000f }, + /* Addr 5G_HT20 5G_HT40 */ + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00000000, 0x00000000}, + {0x0000989c, 0x00e00000, 0x00e00000}, + {0x0000989c, 0x005e0000, 0x005e0000}, + {0x0000989c, 0x00120000, 0x00120000}, + {0x0000989c, 0x00620000, 0x00620000}, + {0x0000989c, 0x00020000, 0x00020000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x40ff0000, 0x40ff0000}, + {0x0000989c, 0x005f0000, 0x005f0000}, + {0x0000989c, 0x00870000, 0x00870000}, + {0x0000989c, 0x00f90000, 0x00f90000}, + {0x0000989c, 0x007b0000, 0x007b0000}, + {0x0000989c, 0x00ff0000, 0x00ff0000}, + {0x0000989c, 0x00f50000, 0x00f50000}, + {0x0000989c, 0x00dc0000, 0x00dc0000}, + {0x0000989c, 0x00110000, 0x00110000}, + {0x0000989c, 0x006100a8, 0x006100a8}, + {0x0000989c, 0x00423022, 0x00423022}, + {0x0000989c, 0x2014008f, 0x2014008f}, + {0x0000989c, 0x00c40002, 0x00c40002}, + {0x0000989c, 0x003000f2, 0x003000f2}, + {0x0000989c, 0x00440016, 0x00440016}, + {0x0000989c, 0x00410040, 0x00410040}, + {0x0000989c, 0x0001805e, 0x0001805e}, + {0x0000989c, 0x0000c0ab, 0x0000c0ab}, + {0x0000989c, 0x000000e1, 0x000000e1}, + {0x0000989c, 0x00007080, 0x00007080}, + {0x0000989c, 0x000000d4, 0x000000d4}, + {0x000098d0, 0x0000000f, 0x0010000f}, }; static const u32 ar5416Bank7_9160[][2] = { - { 0x0000989c, 0x00000500 }, - { 0x0000989c, 0x00000800 }, - { 0x000098cc, 0x0000000e }, + /* Addr allmodes */ + {0x0000989c, 0x00000500}, + {0x0000989c, 0x00000800}, + {0x000098cc, 0x0000000e}, }; static const u32 ar5416Addac_9160[][2] = { - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x000000c0 }, - {0x0000989c, 0x00000018 }, - {0x0000989c, 0x00000004 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x000000c0 }, - {0x0000989c, 0x00000019 }, - {0x0000989c, 0x00000004 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000004 }, - {0x0000989c, 0x00000003 }, - {0x0000989c, 0x00000008 }, - {0x0000989c, 0x00000000 }, - {0x000098cc, 0x00000000 }, + /* Addr allmodes */ + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x000000c0}, + {0x0000989c, 0x00000018}, + {0x0000989c, 0x00000004}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x000000c0}, + {0x0000989c, 0x00000019}, + {0x0000989c, 0x00000004}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000004}, + {0x0000989c, 0x00000003}, + {0x0000989c, 0x00000008}, + {0x0000989c, 0x00000000}, + {0x000098cc, 0x00000000}, }; -static const u32 ar5416Addac_91601_1[][2] = { - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x000000c0 }, - {0x0000989c, 0x00000018 }, - {0x0000989c, 0x00000004 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x000000c0 }, - {0x0000989c, 0x00000019 }, - {0x0000989c, 0x00000004 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x0000989c, 0x00000000 }, - {0x000098cc, 0x00000000 }, +static const u32 ar5416Addac_9160_1_1[][2] = { + /* Addr allmodes */ + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x000000c0}, + {0x0000989c, 0x00000018}, + {0x0000989c, 0x00000004}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x000000c0}, + {0x0000989c, 0x00000019}, + {0x0000989c, 0x00000004}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x0000989c, 0x00000000}, + {0x000098cc, 0x00000000}, }; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 5fdbb53b47e06bd7fc5031a5aaf5b0a01ad27047..dabafb874c36ad7da8f62a99fd69b64d45af8dc1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -239,7 +239,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) if (qCoff > 15) qCoff = 15; else if (qCoff <= -16) - qCoff = 16; + qCoff = -16; ath_print(common, ATH_DBG_CALIBRATE, "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index a8a8cdc04afa1f445f499ed8501d3e90fc1ddf56..303c63da5ea384b56f4eefbdb21dfb770fddda27 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -18,6 +18,11 @@ #include "ar5008_initvals.h" #include "ar9001_initvals.h" #include "ar9002_initvals.h" +#include "ar9002_phy.h" + +int modparam_force_new_ani; +module_param_named(force_new_ani, modparam_force_new_ani, int, 0444); +MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002"); /* General hardware code for the A5008/AR9001/AR9002 hadware families */ @@ -80,21 +85,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah) ar9287PciePhy_clkreq_always_on_L1_9287_1_1, ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1), 2); - } else if (AR_SREV_9287_10_OR_LATER(ah)) { - INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0, - ARRAY_SIZE(ar9287Modes_9287_1_0), 6); - INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0, - ARRAY_SIZE(ar9287Common_9287_1_0), 2); - - if (ah->config.pcie_clock_req) - INIT_INI_ARRAY(&ah->iniPcieSerdes, - ar9287PciePhy_clkreq_off_L1_9287_1_0, - ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2); - else - INIT_INI_ARRAY(&ah->iniPcieSerdes, - ar9287PciePhy_clkreq_always_on_L1_9287_1_0, - ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0), - 2); } else if (AR_SREV_9285_12_OR_LATER(ah)) { @@ -113,21 +103,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah) ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2), 2); } - } else if (AR_SREV_9285_10_OR_LATER(ah)) { - INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285, - ARRAY_SIZE(ar9285Modes_9285), 6); - INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285, - ARRAY_SIZE(ar9285Common_9285), 2); - - if (ah->config.pcie_clock_req) { - INIT_INI_ARRAY(&ah->iniPcieSerdes, - ar9285PciePhy_clkreq_off_L1_9285, - ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2); - } else { - INIT_INI_ARRAY(&ah->iniPcieSerdes, - ar9285PciePhy_clkreq_always_on_L1_9285, - ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2); - } } else if (AR_SREV_9280_20_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2, ARRAY_SIZE(ar9280Modes_9280_2), 6); @@ -146,11 +121,6 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniModesAdditional, ar9280Modes_fast_clock_9280_2, ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3); - } else if (AR_SREV_9280_10_OR_LATER(ah)) { - INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280, - ARRAY_SIZE(ar9280Modes_9280), 6); - INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280, - ARRAY_SIZE(ar9280Common_9280), 2); } else if (AR_SREV_9160_10_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160, ARRAY_SIZE(ar5416Modes_9160), 6); @@ -174,8 +144,8 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah) ARRAY_SIZE(ar5416Bank7_9160), 2); if (AR_SREV_9160_11(ah)) { INIT_INI_ARRAY(&ah->iniAddac, - ar5416Addac_91601_1, - ARRAY_SIZE(ar5416Addac_91601_1), 2); + ar5416Addac_9160_1_1, + ARRAY_SIZE(ar5416Addac_9160_1_1), 2); } else { INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160, ARRAY_SIZE(ar5416Addac_9160), 2); @@ -234,12 +204,12 @@ void ar9002_hw_cck_chan14_spread(struct ath_hw *ah) { if (AR_SREV_9287_11_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniCckfirNormal, - ar9287Common_normal_cck_fir_coeff_92871_1, - ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), + ar9287Common_normal_cck_fir_coeff_9287_1_1, + ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_9287_1_1), 2); INIT_INI_ARRAY(&ah->iniCckfirJapan2484, - ar9287Common_japan_2484_cck_fir_coeff_92871_1, - ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), + ar9287Common_japan_2484_cck_fir_coeff_9287_1_1, + ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_9287_1_1), 2); } } @@ -300,10 +270,6 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9287Modes_rx_gain_9287_1_1, ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6); - else if (AR_SREV_9287_10(ah)) - INIT_INI_ARRAY(&ah->iniModesRxGain, - ar9287Modes_rx_gain_9287_1_0, - ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6); else if (AR_SREV_9280_20(ah)) ar9280_20_hw_init_rxgain_ini(ah); @@ -311,10 +277,6 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9287Modes_tx_gain_9287_1_1, ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6); - } else if (AR_SREV_9287_10(ah)) { - INIT_INI_ARRAY(&ah->iniModesTxGain, - ar9287Modes_tx_gain_9287_1_0, - ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6); } else if (AR_SREV_9280_20(ah)) { ar9280_20_hw_init_txgain_ini(ah); } else if (AR_SREV_9285_12_OR_LATER(ah)) { @@ -384,29 +346,6 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0), INI_RA(&ah->iniPcieSerdes, i, 1)); } - } else if (AR_SREV_9280(ah) && - (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) { - REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00); - REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); - - /* RX shut off when elecidle is asserted */ - REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019); - REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820); - REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560); - - /* Shut off CLKREQ active in L1 */ - if (ah->config.pcie_clock_req) - REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc); - else - REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd); - - REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); - REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); - REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007); - - /* Load the new settings */ - REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); - } else { ENABLE_REGWRITE_BUFFER(ah); @@ -436,55 +375,84 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, } udelay(1000); + } - /* set bit 19 to allow forcing of pcie core into L1 state */ - REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); + if (power_off) { + /* clear bit 19 to disable L1 */ + REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); + + val = REG_READ(ah, AR_WA); - /* Several PCIe massages to ensure proper behaviour */ + /* + * Set PCIe workaround bits + * In AR9280 and AR9285, bit 14 in WA register (disable L1) + * should only be set when device enters D3 and be + * cleared when device comes back to D0. + */ + if (ah->config.pcie_waen) { + if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) + val |= AR_WA_D3_L1_DISABLE; + } else { + if (((AR_SREV_9285(ah) || + AR_SREV_9271(ah) || + AR_SREV_9287(ah)) && + (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) || + (AR_SREV_9280(ah) && + (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) { + val |= AR_WA_D3_L1_DISABLE; + } + } + + if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { + /* + * Disable bit 6 and 7 before entering D3 to + * prevent system hang. + */ + val &= ~(AR_WA_BIT6 | AR_WA_BIT7); + } + + if (AR_SREV_9285E_20(ah)) + val |= AR_WA_BIT23; + + REG_WRITE(ah, AR_WA, val); + } else { if (ah->config.pcie_waen) { val = ah->config.pcie_waen; if (!power_off) val &= (~AR_WA_D3_L1_DISABLE); } else { - if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || + if (AR_SREV_9285(ah) || + AR_SREV_9271(ah) || AR_SREV_9287(ah)) { val = AR9285_WA_DEFAULT; if (!power_off) val &= (~AR_WA_D3_L1_DISABLE); - } else if (AR_SREV_9280(ah)) { + } + else if (AR_SREV_9280(ah)) { /* - * On AR9280 chips bit 22 of 0x4004 needs to be - * set otherwise card may disappear. + * For AR9280 chips, bit 22 of 0x4004 + * needs to be set. */ val = AR9280_WA_DEFAULT; if (!power_off) val &= (~AR_WA_D3_L1_DISABLE); - } else + } else { val = AR_WA_DEFAULT; + } + } + + /* WAR for ASPM system hang */ + if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { + val |= (AR_WA_BIT6 | AR_WA_BIT7); } + if (AR_SREV_9285E_20(ah)) + val |= AR_WA_BIT23; + REG_WRITE(ah, AR_WA, val); - } - if (power_off) { - /* - * Set PCIe workaround bits - * bit 14 in WA register (disable L1) should only - * be set when device enters D3 and be cleared - * when device comes back to D0. - */ - if (ah->config.pcie_waen) { - if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) - REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE); - } else { - if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) || - AR_SREV_9287(ah)) && - (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) || - (AR_SREV_9280(ah) && - (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) { - REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE); - } - } + /* set bit 19 to allow forcing of pcie core into L1 state */ + REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); } } @@ -536,18 +504,29 @@ int ar9002_hw_rf_claim(struct ath_hw *ah) return 0; } +void ar9002_hw_enable_async_fifo(struct ath_hw *ah) +{ + if (AR_SREV_9287_13_OR_LATER(ah)) { + REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL); + REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO); + REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); + REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); + } +} + /* - * Enable ASYNC FIFO - * * If Async FIFO is enabled, the following counters change as MAC now runs * at 117 Mhz instead of 88/44MHz when async FIFO is disabled. * * The values below tested for ht40 2 chain. * Overwrite the delay/timeouts initialized in process ini. */ -void ar9002_hw_enable_async_fifo(struct ath_hw *ah) +void ar9002_hw_update_async_fifo(struct ath_hw *ah) { - if (AR_SREV_9287_12_OR_LATER(ah)) { + if (AR_SREV_9287_13_OR_LATER(ah)) { REG_WRITE(ah, AR_D_GBL_IFS_SIFS, AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR); REG_WRITE(ah, AR_D_GBL_IFS_SLOT, @@ -571,9 +550,9 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah) */ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah) { - if (AR_SREV_9287_12_OR_LATER(ah)) { + if (AR_SREV_9287_13_OR_LATER(ah)) { REG_SET_BIT(ah, AR_PCU_MISC_MODE2, - AR_PCU_MISC_MODE2_ENABLE_AGGWEP); + AR_PCU_MISC_MODE2_ENABLE_AGGWEP); } } @@ -595,4 +574,9 @@ void ar9002_hw_attach_ops(struct ath_hw *ah) ar9002_hw_attach_calib_ops(ah); ar9002_hw_attach_mac_ops(ah); + + if (modparam_force_new_ani) + ath9k_hw_attach_ani_ops_new(ah); + else + ath9k_hw_attach_ani_ops_old(ah); } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h index dae7f3304eb877610e2dca763aa904e491771756..6203eed860ddc1c31fd2ba0ed10d4151a7e843b5 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h @@ -14,5217 +14,3252 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef INITVALS_9002_10_H -#define INITVALS_9002_10_H - -static const u32 ar9280Modes_9280[][6] = { - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 }, - { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 }, - { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 }, - { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 }, - { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, - { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, - { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, - { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 }, - { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 }, - { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, - { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 }, - { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, - { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, - { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, - { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a }, - { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 }, - { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 }, - { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 }, - { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 }, - { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c }, - { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 }, - { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 }, - { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 }, - { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac }, - { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 }, - { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 }, - { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 }, - { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 }, - { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 }, - { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 }, - { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 }, - { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 }, - { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac }, - { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 }, - { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 }, - { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 }, - { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 }, - { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 }, - { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad }, - { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, - { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, - { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, - { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, - { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, - { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, - { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, - { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, - { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, - { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, - { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, - { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, - { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, - { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, - { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, - { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, - { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, - { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, - { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, - { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, - { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, - { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, - { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, - { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, - { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, - { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, - { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, - { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, - { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, - { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, - { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, - { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, - { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, - { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, - { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c }, - { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 }, - { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 }, - { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 }, - { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 }, - { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 }, - { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 }, - { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 }, - { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 }, - { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 }, - { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 }, - { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 }, - { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 }, - { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c }, - { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 }, - { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 }, - { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 }, - { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 }, - { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 }, - { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 }, - { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 }, - { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 }, - { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad }, - { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 }, - { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 }, - { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 }, - { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 }, - { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 }, - { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 }, - { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 }, - { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 }, - { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 }, - { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca }, - { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce }, - { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 }, - { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 }, - { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 }, - { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 }, - { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb }, - { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf }, - { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 }, - { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, - { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 }, - { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 }, - { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, - { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, - { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b }, - { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 }, - { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 }, - { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a }, - { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 }, - { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, - { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b }, - { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 }, - { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 }, - { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a }, - { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 }, - { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b }, - { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 }, - { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 }, - { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a }, - { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 }, - { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, - { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, - { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, - { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c }, - { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 }, - { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 }, - { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 }, -}; - -static const u32 ar9280Common_9280[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020015 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00004024, 0x0000001f }, - { 0x00007010, 0x00000033 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x40000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x00008070, 0x00000000 }, - { 0x000080c0, 0x2a82301a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008120, 0x08f04800 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0x00000000 }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c4, 0x00000000 }, - { 0x000081d0, 0x00003210 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x00008300, 0x00000000 }, - { 0x00008304, 0x00000000 }, - { 0x00008308, 0x00000000 }, - { 0x0000830c, 0x00000000 }, - { 0x00008310, 0x00000000 }, - { 0x00008314, 0x00000000 }, - { 0x00008318, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000007 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00000000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x000107ff }, - { 0x00008344, 0x00000000 }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xaf268e30 }, - { 0x00009810, 0xfd14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x00009840, 0x206a01ae }, - { 0x0000984c, 0x0040233c }, - { 0x0000a84c, 0x0040233c }, - { 0x00009854, 0x00000044 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x04900000 }, - { 0x0000a920, 0x04900000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009948, 0x9280c00a }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0xe250a51e }, - { 0x00009958, 0x3388ffff }, - { 0x00009940, 0x00781204 }, - { 0x0000c95c, 0x004b6a8e }, - { 0x0000c968, 0x000003ce }, - { 0x00009970, 0x190fb514 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x006f00c4 }, - { 0x000099b0, 0x03051000 }, - { 0x000099b4, 0x00000820 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000000 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099fc, 0x00001042 }, - { 0x0000a210, 0x4080a333 }, - { 0x0000a214, 0x40206c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x01834061 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x000003b5 }, - { 0x0000a22c, 0x23277200 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a23c, 0x13c889af }, - { 0x0000a240, 0x38490a20 }, - { 0x0000a244, 0x00007bb6 }, - { 0x0000a248, 0x0fff3ffc }, - { 0x0000a24c, 0x00000001 }, - { 0x0000a250, 0x001da000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0cdbd380 }, - { 0x0000a25c, 0x0f0f0f01 }, - { 0x0000a260, 0xdfa91f01 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0ebae9c6 }, - { 0x0000b26c, 0x0ebae9c6 }, - { 0x0000d270, 0x00820820 }, - { 0x0000a278, 0x1ce739ce }, - { 0x0000a27c, 0x050701ce }, - { 0x0000a358, 0x7999aa0f }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x1ce739ce }, - { 0x0000a398, 0x000001ce }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3c8, 0x00000246 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x1ce739ce }, - { 0x0000a3e0, 0x000001ce }, - { 0x0000a3e4, 0x00000000 }, - { 0x0000a3e8, 0x18c43433 }, - { 0x0000a3ec, 0x00f38081 }, - { 0x00007800, 0x00040000 }, - { 0x00007804, 0xdb005012 }, - { 0x00007808, 0x04924914 }, - { 0x0000780c, 0x21084210 }, - { 0x00007810, 0x6d801300 }, - { 0x00007814, 0x0019beff }, - { 0x00007818, 0x07e40000 }, - { 0x0000781c, 0x00492000 }, - { 0x00007820, 0x92492480 }, - { 0x00007824, 0x00040000 }, - { 0x00007828, 0xdb005012 }, - { 0x0000782c, 0x04924914 }, - { 0x00007830, 0x21084210 }, - { 0x00007834, 0x6d801300 }, - { 0x00007838, 0x0019beff }, - { 0x0000783c, 0x07e40000 }, - { 0x00007840, 0x00492000 }, - { 0x00007844, 0x92492480 }, - { 0x00007848, 0x00120000 }, - { 0x00007850, 0x54214514 }, - { 0x00007858, 0x92592692 }, - { 0x00007860, 0x52802000 }, - { 0x00007864, 0x0a8e370e }, - { 0x00007868, 0xc0102850 }, - { 0x0000786c, 0x812d4000 }, - { 0x00007874, 0x001b6db0 }, - { 0x00007878, 0x00376b63 }, - { 0x0000787c, 0x06db6db6 }, - { 0x00007880, 0x006d8000 }, - { 0x00007884, 0xffeffffe }, - { 0x00007888, 0xffeffffe }, - { 0x00007890, 0x00060aeb }, - { 0x00007894, 0x5a108000 }, - { 0x00007898, 0x2a850160 }, -}; - -/* XXX 9280 2 */ static const u32 ar9280Modes_9280_2[][6] = { - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, - { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, - { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a }, - { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e }, - { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, - { 0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, - { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, - { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e }, - { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, - { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, - { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, - { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 }, - { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, - { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, - { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, - { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce }, - { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c }, - { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, - { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, - { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000 }, - { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 }, - { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, - { 0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000 }, - { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 }, + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180}, + {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0}, + {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f}, + {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810}, + {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a}, + {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880}, + {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303}, + {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200}, + {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, + {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001}, + {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007}, + {0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e}, + {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0}, + {0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2}, + {0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e}, + {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18}, + {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00}, + {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881}, + {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0}, + {0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016}, + {0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d}, + {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010}, + {0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010}, + {0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010}, + {0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210}, + {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce}, + {0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c}, + {0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00}, + {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, + {0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444}, + {0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019}, + {0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019}, + {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a}, + {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000}, + {0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000}, + {0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000}, + {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e}, + {0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000}, + {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000}, }; static const u32 ar9280Common_9280_2[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020015 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00004024, 0x0000001f }, - { 0x00004060, 0x00000000 }, - { 0x00004064, 0x00000000 }, - { 0x00007010, 0x00000033 }, - { 0x00007034, 0x00000002 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x40000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x00008070, 0x00000000 }, - { 0x000080c0, 0x2a80001a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0xffffffff }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c0, 0x00000000 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008264, 0x88a00010 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x0000829c, 0x00000000 }, - { 0x00008300, 0x00000040 }, - { 0x00008314, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000007 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00ff0000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x000107ff }, - { 0x00008344, 0x00481043 }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xafa68e30 }, - { 0x00009810, 0xfd14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x0000984c, 0x0040233c }, - { 0x0000a84c, 0x0040233c }, - { 0x00009854, 0x00000044 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x00009910, 0x01002310 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x04900000 }, - { 0x0000a920, 0x04900000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009948, 0x9280c00a }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5f3ca3de }, - { 0x00009958, 0x2108ecff }, - { 0x00009940, 0x14750604 }, - { 0x0000c95c, 0x004b6a8e }, - { 0x00009970, 0x190fb515 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x006f0000 }, - { 0x000099b0, 0x03051000 }, - { 0x000099b4, 0x00000820 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000000 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099f0, 0x00000000 }, - { 0x000099fc, 0x00001042 }, - { 0x0000a208, 0x803e4788 }, - { 0x0000a210, 0x4080a333 }, - { 0x0000a214, 0x40206c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x01834061 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x000003b5 }, - { 0x0000a22c, 0x233f7180 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a240, 0x38490a20 }, - { 0x0000a244, 0x00007bb6 }, - { 0x0000a248, 0x0fff3ffc }, - { 0x0000a24c, 0x00000000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0cdbd380 }, - { 0x0000a25c, 0x0f0f0f01 }, - { 0x0000a260, 0xdfa91f01 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0e79e5c6 }, - { 0x0000b26c, 0x0e79e5c6 }, - { 0x0000d270, 0x00820820 }, - { 0x0000a278, 0x1ce739ce }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x1ce739ce }, - { 0x0000a398, 0x000001ce }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3c8, 0x00000246 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x1ce739ce }, - { 0x0000a3e0, 0x000001ce }, - { 0x0000a3e4, 0x00000000 }, - { 0x0000a3e8, 0x18c43433 }, - { 0x0000a3ec, 0x00f70081 }, - { 0x00007800, 0x00040000 }, - { 0x00007804, 0xdb005012 }, - { 0x00007808, 0x04924914 }, - { 0x0000780c, 0x21084210 }, - { 0x00007810, 0x6d801300 }, - { 0x00007818, 0x07e41000 }, - { 0x00007824, 0x00040000 }, - { 0x00007828, 0xdb005012 }, - { 0x0000782c, 0x04924914 }, - { 0x00007830, 0x21084210 }, - { 0x00007834, 0x6d801300 }, - { 0x0000783c, 0x07e40000 }, - { 0x00007848, 0x00100000 }, - { 0x0000784c, 0x773f0567 }, - { 0x00007850, 0x54214514 }, - { 0x00007854, 0x12035828 }, - { 0x00007858, 0x9259269a }, - { 0x00007860, 0x52802000 }, - { 0x00007864, 0x0a8e370e }, - { 0x00007868, 0xc0102850 }, - { 0x0000786c, 0x812d4000 }, - { 0x00007870, 0x807ec400 }, - { 0x00007874, 0x001b6db0 }, - { 0x00007878, 0x00376b63 }, - { 0x0000787c, 0x06db6db6 }, - { 0x00007880, 0x006d8000 }, - { 0x00007884, 0xffeffffe }, - { 0x00007888, 0xffeffffe }, - { 0x0000788c, 0x00010000 }, - { 0x00007890, 0x02060aeb }, - { 0x00007898, 0x2a850160 }, + /* Addr allmodes */ + {0x0000000c, 0x00000000}, + {0x00000030, 0x00020015}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000008}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00000054, 0x0000001f}, + {0x00000800, 0x00000000}, + {0x00000804, 0x00000000}, + {0x00000808, 0x00000000}, + {0x0000080c, 0x00000000}, + {0x00000810, 0x00000000}, + {0x00000814, 0x00000000}, + {0x00000818, 0x00000000}, + {0x0000081c, 0x00000000}, + {0x00000820, 0x00000000}, + {0x00000824, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x00001230, 0x00000000}, + {0x00001270, 0x00000000}, + {0x00001038, 0x00000000}, + {0x00001078, 0x00000000}, + {0x000010b8, 0x00000000}, + {0x000010f8, 0x00000000}, + {0x00001138, 0x00000000}, + {0x00001178, 0x00000000}, + {0x000011b8, 0x00000000}, + {0x000011f8, 0x00000000}, + {0x00001238, 0x00000000}, + {0x00001278, 0x00000000}, + {0x000012b8, 0x00000000}, + {0x000012f8, 0x00000000}, + {0x00001338, 0x00000000}, + {0x00001378, 0x00000000}, + {0x000013b8, 0x00000000}, + {0x000013f8, 0x00000000}, + {0x00001438, 0x00000000}, + {0x00001478, 0x00000000}, + {0x000014b8, 0x00000000}, + {0x000014f8, 0x00000000}, + {0x00001538, 0x00000000}, + {0x00001578, 0x00000000}, + {0x000015b8, 0x00000000}, + {0x000015f8, 0x00000000}, + {0x00001638, 0x00000000}, + {0x00001678, 0x00000000}, + {0x000016b8, 0x00000000}, + {0x000016f8, 0x00000000}, + {0x00001738, 0x00000000}, + {0x00001778, 0x00000000}, + {0x000017b8, 0x00000000}, + {0x000017f8, 0x00000000}, + {0x0000103c, 0x00000000}, + {0x0000107c, 0x00000000}, + {0x000010bc, 0x00000000}, + {0x000010fc, 0x00000000}, + {0x0000113c, 0x00000000}, + {0x0000117c, 0x00000000}, + {0x000011bc, 0x00000000}, + {0x000011fc, 0x00000000}, + {0x0000123c, 0x00000000}, + {0x0000127c, 0x00000000}, + {0x000012bc, 0x00000000}, + {0x000012fc, 0x00000000}, + {0x0000133c, 0x00000000}, + {0x0000137c, 0x00000000}, + {0x000013bc, 0x00000000}, + {0x000013fc, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00004030, 0x00000002}, + {0x0000403c, 0x00000002}, + {0x00004024, 0x0000001f}, + {0x00004060, 0x00000000}, + {0x00004064, 0x00000000}, + {0x00007010, 0x00000033}, + {0x00007034, 0x00000002}, + {0x00007038, 0x000004c2}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000700}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008048, 0x40000000}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x00008070, 0x00000000}, + {0x000080c0, 0x2a80001a}, + {0x000080c4, 0x05dc01e0}, + {0x000080c8, 0x1f402710}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00001e00}, + {0x000080d4, 0x00000000}, + {0x000080d8, 0x00400000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x003f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080f8, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00020000}, + {0x00008104, 0x00000001}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000168}, + {0x00008118, 0x000100aa}, + {0x0000811c, 0x00003210}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x00000000}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x32143320}, + {0x00008174, 0xfaa4fa50}, + {0x00008178, 0x00000100}, + {0x0000817c, 0x00000000}, + {0x000081c0, 0x00000000}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008200, 0x00000000}, + {0x00008204, 0x00000000}, + {0x00008208, 0x00000000}, + {0x0000820c, 0x00000000}, + {0x00008210, 0x00000000}, + {0x00008214, 0x00000000}, + {0x00008218, 0x00000000}, + {0x0000821c, 0x00000000}, + {0x00008220, 0x00000000}, + {0x00008224, 0x00000000}, + {0x00008228, 0x00000000}, + {0x0000822c, 0x00000000}, + {0x00008230, 0x00000000}, + {0x00008234, 0x00000000}, + {0x00008238, 0x00000000}, + {0x0000823c, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000100}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x400000ff}, + {0x00008260, 0x00080922}, + {0x00008264, 0x88a00010}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000000}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x00000000}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x0000829c, 0x00000000}, + {0x00008300, 0x00000040}, + {0x00008314, 0x00000000}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000007}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000e00}, + {0x00008338, 0x00ff0000}, + {0x0000833c, 0x00000000}, + {0x00008340, 0x000107ff}, + {0x00008344, 0x00481043}, + {0x00009808, 0x00000000}, + {0x0000980c, 0xafa68e30}, + {0x00009810, 0xfd14e000}, + {0x00009814, 0x9c0a9f6b}, + {0x0000981c, 0x00000000}, + {0x0000982c, 0x0000a000}, + {0x00009830, 0x00000000}, + {0x0000983c, 0x00200400}, + {0x0000984c, 0x0040233c}, + {0x0000a84c, 0x0040233c}, + {0x00009854, 0x00000044}, + {0x00009900, 0x00000000}, + {0x00009904, 0x00000000}, + {0x00009908, 0x00000000}, + {0x0000990c, 0x00000000}, + {0x00009910, 0x01002310}, + {0x0000991c, 0x10000fff}, + {0x00009920, 0x04900000}, + {0x0000a920, 0x04900000}, + {0x00009928, 0x00000001}, + {0x0000992c, 0x00000004}, + {0x00009934, 0x1e1f2022}, + {0x00009938, 0x0a0b0c0d}, + {0x0000993c, 0x00000000}, + {0x00009948, 0x9280c00a}, + {0x0000994c, 0x00020028}, + {0x00009954, 0x5f3ca3de}, + {0x00009958, 0x2108ecff}, + {0x00009940, 0x14750604}, + {0x0000c95c, 0x004b6a8e}, + {0x00009970, 0x190fb514}, + {0x00009974, 0x00000000}, + {0x00009978, 0x00000001}, + {0x0000997c, 0x00000000}, + {0x00009980, 0x00000000}, + {0x00009984, 0x00000000}, + {0x00009988, 0x00000000}, + {0x0000998c, 0x00000000}, + {0x00009990, 0x00000000}, + {0x00009994, 0x00000000}, + {0x00009998, 0x00000000}, + {0x0000999c, 0x00000000}, + {0x000099a0, 0x00000000}, + {0x000099a4, 0x00000001}, + {0x000099a8, 0x201fff00}, + {0x000099ac, 0x006f0000}, + {0x000099b0, 0x03051000}, + {0x000099b4, 0x00000820}, + {0x000099c4, 0x06336f77}, + {0x000099c8, 0x6af6532f}, + {0x000099cc, 0x08f186c8}, + {0x000099d0, 0x00046384}, + {0x000099d4, 0x00000000}, + {0x000099d8, 0x00000000}, + {0x000099dc, 0x00000000}, + {0x000099e0, 0x00000000}, + {0x000099e4, 0xaaaaaaaa}, + {0x000099e8, 0x3c466478}, + {0x000099ec, 0x0cc80caa}, + {0x000099f0, 0x00000000}, + {0x000099fc, 0x00001042}, + {0x0000a208, 0x803e4788}, + {0x0000a210, 0x4080a333}, + {0x0000a214, 0x40206c10}, + {0x0000a218, 0x009c4060}, + {0x0000a220, 0x01834061}, + {0x0000a224, 0x00000400}, + {0x0000a228, 0x000003b5}, + {0x0000a22c, 0x233f7180}, + {0x0000a234, 0x20202020}, + {0x0000a238, 0x20202020}, + {0x0000a240, 0x38490a20}, + {0x0000a244, 0x00007bb6}, + {0x0000a248, 0x0fff3ffc}, + {0x0000a24c, 0x00000000}, + {0x0000a254, 0x00000000}, + {0x0000a258, 0x0cdbd380}, + {0x0000a25c, 0x0f0f0f01}, + {0x0000a260, 0xdfa91f01}, + {0x0000a268, 0x00000000}, + {0x0000a26c, 0x0e79e5c6}, + {0x0000b26c, 0x0e79e5c6}, + {0x0000d270, 0x00820820}, + {0x0000a278, 0x1ce739ce}, + {0x0000d35c, 0x07ffffef}, + {0x0000d360, 0x0fffffe7}, + {0x0000d364, 0x17ffffe5}, + {0x0000d368, 0x1fffffe4}, + {0x0000d36c, 0x37ffffe3}, + {0x0000d370, 0x3fffffe3}, + {0x0000d374, 0x57ffffe3}, + {0x0000d378, 0x5fffffe2}, + {0x0000d37c, 0x7fffffe2}, + {0x0000d380, 0x7f3c7bba}, + {0x0000d384, 0xf3307ff0}, + {0x0000a38c, 0x20202020}, + {0x0000a390, 0x20202020}, + {0x0000a394, 0x1ce739ce}, + {0x0000a398, 0x000001ce}, + {0x0000a39c, 0x00000001}, + {0x0000a3a0, 0x00000000}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0x00000000}, + {0x0000a3ac, 0x00000000}, + {0x0000a3b0, 0x00000000}, + {0x0000a3b4, 0x00000000}, + {0x0000a3b8, 0x00000000}, + {0x0000a3bc, 0x00000000}, + {0x0000a3c0, 0x00000000}, + {0x0000a3c4, 0x00000000}, + {0x0000a3c8, 0x00000246}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3dc, 0x1ce739ce}, + {0x0000a3e0, 0x000001ce}, + {0x0000a3e4, 0x00000000}, + {0x0000a3e8, 0x18c43433}, + {0x00007800, 0x00040000}, + {0x00007804, 0xdb005012}, + {0x00007808, 0x04924914}, + {0x0000780c, 0x21084210}, + {0x00007810, 0x6d801300}, + {0x00007818, 0x07e41000}, + {0x00007824, 0x00040000}, + {0x00007828, 0xdb005012}, + {0x0000782c, 0x04924914}, + {0x00007830, 0x21084210}, + {0x00007834, 0x6d801300}, + {0x0000783c, 0x07e40000}, + {0x00007848, 0x00100000}, + {0x0000784c, 0x773f0567}, + {0x00007850, 0x54214514}, + {0x00007854, 0x12035828}, + {0x00007858, 0x9259269a}, + {0x00007860, 0x52802000}, + {0x00007864, 0x0a8e370e}, + {0x00007868, 0xc0102850}, + {0x0000786c, 0x812d4000}, + {0x00007870, 0x807ec400}, + {0x00007874, 0x001b6db0}, + {0x00007878, 0x00376b63}, + {0x0000787c, 0x06db6db6}, + {0x00007880, 0x006d8000}, + {0x00007884, 0xffeffffe}, + {0x00007888, 0xffeffffe}, + {0x0000788c, 0x00010000}, + {0x00007890, 0x02060aeb}, + {0x00007898, 0x2a850160}, }; static const u32 ar9280Modes_fast_clock_9280_2[][3] = { - { 0x00001030, 0x00000268, 0x000004d0 }, - { 0x00001070, 0x0000018c, 0x00000318 }, - { 0x000010b0, 0x00000fd0, 0x00001fa0 }, - { 0x00008014, 0x044c044c, 0x08980898 }, - { 0x0000801c, 0x148ec02b, 0x148ec057 }, - { 0x00008318, 0x000044c0, 0x00008980 }, - { 0x00009820, 0x02020200, 0x02020200 }, - { 0x00009824, 0x01000f0f, 0x01000f0f }, - { 0x00009828, 0x0b020001, 0x0b020001 }, - { 0x00009834, 0x00000f0f, 0x00000f0f }, - { 0x00009844, 0x03721821, 0x03721821 }, - { 0x00009914, 0x00000898, 0x00001130 }, - { 0x00009918, 0x0000000b, 0x00000016 }, + /* Addr 5G_HT20 5G_HT40 */ + {0x00001030, 0x00000268, 0x000004d0}, + {0x00001070, 0x0000018c, 0x00000318}, + {0x000010b0, 0x00000fd0, 0x00001fa0}, + {0x00008014, 0x044c044c, 0x08980898}, + {0x0000801c, 0x148ec02b, 0x148ec057}, + {0x00008318, 0x000044c0, 0x00008980}, + {0x00009820, 0x02020200, 0x02020200}, + {0x00009824, 0x01000f0f, 0x01000f0f}, + {0x00009828, 0x0b020001, 0x0b020001}, + {0x00009834, 0x00000f0f, 0x00000f0f}, + {0x00009844, 0x03721821, 0x03721821}, + {0x00009914, 0x00000898, 0x00001130}, + {0x00009918, 0x0000000b, 0x00000016}, }; static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = { - { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, - { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, - { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, - { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, - { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, - { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, - { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, - { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, - { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, - { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, - { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, - { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, - { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, - { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, - { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, - { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, - { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, - { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, - { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, - { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, - { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, - { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, - { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, - { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, - { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, - { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, - { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, - { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, - { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, - { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, - { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, - { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, - { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, - { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, - { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, - { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, - { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, - { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, - { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, - { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, - { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, - { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, - { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, - { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, - { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, - { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, - { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, - { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, - { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10, 0x00008b10 }, - { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b14, 0x00008b14, 0x00008b14 }, - { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b01, 0x00008b01, 0x00008b01 }, - { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b05, 0x00008b05, 0x00008b05 }, - { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b09, 0x00008b09, 0x00008b09 }, - { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b0d, 0x00008b0d, 0x00008b0d }, - { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b11, 0x00008b11, 0x00008b11 }, - { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008b15, 0x00008b15, 0x00008b15 }, - { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008b02, 0x00008b02, 0x00008b02 }, - { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008b06, 0x00008b06, 0x00008b06 }, - { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00008b0a, 0x00008b0a, 0x00008b0a }, - { 0x00009aec, 0x0000b784, 0x0000b784, 0x00008b0e, 0x00008b0e, 0x00008b0e }, - { 0x00009af0, 0x0000b788, 0x0000b788, 0x00008b12, 0x00008b12, 0x00008b12 }, - { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008b16, 0x00008b16, 0x00008b16 }, - { 0x00009af8, 0x0000b790, 0x0000b790, 0x00008b03, 0x00008b03, 0x00008b03 }, - { 0x00009afc, 0x0000b794, 0x0000b794, 0x00008b07, 0x00008b07, 0x00008b07 }, - { 0x00009b00, 0x0000b798, 0x0000b798, 0x00008b0b, 0x00008b0b, 0x00008b0b }, - { 0x00009b04, 0x0000d784, 0x0000d784, 0x00008b0f, 0x00008b0f, 0x00008b0f }, - { 0x00009b08, 0x0000d788, 0x0000d788, 0x00008b13, 0x00008b13, 0x00008b13 }, - { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008b17, 0x00008b17, 0x00008b17 }, - { 0x00009b10, 0x0000d790, 0x0000d790, 0x00008b23, 0x00008b23, 0x00008b23 }, - { 0x00009b14, 0x0000f780, 0x0000f780, 0x00008b27, 0x00008b27, 0x00008b27 }, - { 0x00009b18, 0x0000f784, 0x0000f784, 0x00008b2b, 0x00008b2b, 0x00008b2b }, - { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00008b2f, 0x00008b2f, 0x00008b2f }, - { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008b33, 0x00008b33, 0x00008b33 }, - { 0x00009b24, 0x0000f790, 0x0000f790, 0x00008b37, 0x00008b37, 0x00008b37 }, - { 0x00009b28, 0x0000f794, 0x0000f794, 0x00008b43, 0x00008b43, 0x00008b43 }, - { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008b47, 0x00008b47, 0x00008b47 }, - { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008b4b, 0x00008b4b, 0x00008b4b }, - { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008b4f, 0x00008b4f, 0x00008b4f }, - { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008b53, 0x00008b53, 0x00008b53 }, - { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008b57, 0x00008b57, 0x00008b57 }, - { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, - { 0x00009848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 }, - { 0x0000a848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 }, + {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290}, + {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300}, + {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304}, + {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308}, + {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c}, + {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004}, + {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008}, + {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c}, + {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080}, + {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084}, + {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088}, + {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c}, + {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100}, + {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104}, + {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108}, + {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c}, + {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110}, + {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114}, + {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180}, + {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184}, + {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188}, + {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c}, + {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190}, + {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194}, + {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0}, + {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c}, + {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8}, + {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284}, + {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288}, + {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224}, + {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290}, + {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300}, + {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304}, + {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308}, + {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c}, + {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380}, + {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384}, + {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700}, + {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704}, + {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708}, + {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c}, + {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780}, + {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784}, + {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00}, + {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04}, + {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08}, + {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c}, + {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10, 0x00008b10}, + {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b80, 0x00008b80, 0x00008b80}, + {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b84, 0x00008b84, 0x00008b84}, + {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b88, 0x00008b88, 0x00008b88}, + {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b8c, 0x00008b8c, 0x00008b8c}, + {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b90, 0x00008b90, 0x00008b90}, + {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b94, 0x00008b94, 0x00008b94}, + {0x00009adc, 0x0000b390, 0x0000b390, 0x00008b98, 0x00008b98, 0x00008b98}, + {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008ba4, 0x00008ba4, 0x00008ba4}, + {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008ba8, 0x00008ba8, 0x00008ba8}, + {0x00009ae8, 0x0000b780, 0x0000b780, 0x00008bac, 0x00008bac, 0x00008bac}, + {0x00009aec, 0x0000b784, 0x0000b784, 0x00008bb0, 0x00008bb0, 0x00008bb0}, + {0x00009af0, 0x0000b788, 0x0000b788, 0x00008bb4, 0x00008bb4, 0x00008bb4}, + {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008ba1, 0x00008ba1, 0x00008ba1}, + {0x00009af8, 0x0000b790, 0x0000b790, 0x00008ba5, 0x00008ba5, 0x00008ba5}, + {0x00009afc, 0x0000b794, 0x0000b794, 0x00008ba9, 0x00008ba9, 0x00008ba9}, + {0x00009b00, 0x0000b798, 0x0000b798, 0x00008bad, 0x00008bad, 0x00008bad}, + {0x00009b04, 0x0000d784, 0x0000d784, 0x00008bb1, 0x00008bb1, 0x00008bb1}, + {0x00009b08, 0x0000d788, 0x0000d788, 0x00008bb5, 0x00008bb5, 0x00008bb5}, + {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008ba2, 0x00008ba2, 0x00008ba2}, + {0x00009b10, 0x0000d790, 0x0000d790, 0x00008ba6, 0x00008ba6, 0x00008ba6}, + {0x00009b14, 0x0000f780, 0x0000f780, 0x00008baa, 0x00008baa, 0x00008baa}, + {0x00009b18, 0x0000f784, 0x0000f784, 0x00008bae, 0x00008bae, 0x00008bae}, + {0x00009b1c, 0x0000f788, 0x0000f788, 0x00008bb2, 0x00008bb2, 0x00008bb2}, + {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008bb6, 0x00008bb6, 0x00008bb6}, + {0x00009b24, 0x0000f790, 0x0000f790, 0x00008ba3, 0x00008ba3, 0x00008ba3}, + {0x00009b28, 0x0000f794, 0x0000f794, 0x00008ba7, 0x00008ba7, 0x00008ba7}, + {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008bab, 0x00008bab, 0x00008bab}, + {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008baf, 0x00008baf, 0x00008baf}, + {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008bb3, 0x00008bb3, 0x00008bb3}, + {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008bb7, 0x00008bb7, 0x00008bb7}, + {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008bc3, 0x00008bc3, 0x00008bc3}, + {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008bc7, 0x00008bc7, 0x00008bc7}, + {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008bcb, 0x00008bcb, 0x00008bcb}, + {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008bcf, 0x00008bcf, 0x00008bcf}, + {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008bd3, 0x00008bd3, 0x00008bd3}, + {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008bd7, 0x00008bd7, 0x00008bd7}, + {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb, 0x00008bdb}, + {0x00009848, 0x00001066, 0x00001066, 0x00001055, 0x00001055, 0x00001055}, + {0x0000a848, 0x00001066, 0x00001066, 0x00001055, 0x00001055, 0x00001055}, }; static const u32 ar9280Modes_original_rxgain_9280_2[][6] = { - { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, - { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, - { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, - { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, - { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, - { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, - { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, - { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, - { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, - { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, - { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, - { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, - { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, - { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, - { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, - { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, - { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, - { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, - { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, - { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, - { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, - { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, - { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, - { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, - { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, - { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, - { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, - { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, - { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, - { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, - { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, - { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, - { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, - { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, - { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, - { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, - { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, - { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, - { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, - { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, - { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, - { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, - { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, - { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, - { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, - { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, - { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, - { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, - { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, - { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, - { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, - { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, - { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, - { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, - { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, - { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, - { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, - { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, - { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c }, - { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 }, - { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 }, - { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 }, - { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 }, - { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 }, - { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 }, - { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 }, - { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 }, - { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 }, - { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 }, - { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 }, - { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 }, - { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c }, - { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 }, - { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 }, - { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 }, - { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 }, - { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 }, - { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 }, - { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 }, - { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 }, - { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad }, - { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 }, - { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 }, - { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 }, - { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 }, - { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 }, - { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 }, - { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 }, - { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 }, - { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 }, - { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca }, - { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce }, - { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 }, - { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 }, - { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 }, - { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 }, - { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb }, - { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf }, - { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 }, - { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, - { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, - { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, + {0x00009a00, 0x00008184, 0x00008184, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a04, 0x00008188, 0x00008188, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a08, 0x0000818c, 0x0000818c, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a0c, 0x00008190, 0x00008190, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a10, 0x00008194, 0x00008194, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004}, + {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008}, + {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c}, + {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080}, + {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084}, + {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088}, + {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c}, + {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100}, + {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104}, + {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108}, + {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c}, + {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110}, + {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114}, + {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180}, + {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184}, + {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188}, + {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c}, + {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190}, + {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194}, + {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0}, + {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c}, + {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8}, + {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284}, + {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288}, + {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224}, + {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290}, + {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300}, + {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304}, + {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308}, + {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c}, + {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380}, + {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384}, + {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700}, + {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704}, + {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708}, + {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c}, + {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780}, + {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784}, + {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00}, + {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04}, + {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08}, + {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c}, + {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80}, + {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84}, + {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88}, + {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c}, + {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90}, + {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80}, + {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84}, + {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88}, + {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c}, + {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90}, + {0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c}, + {0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310}, + {0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384}, + {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388}, + {0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324}, + {0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704}, + {0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4}, + {0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8}, + {0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710}, + {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714}, + {0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720}, + {0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724}, + {0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728}, + {0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c}, + {0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0}, + {0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4}, + {0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8}, + {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0}, + {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4}, + {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8}, + {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5}, + {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9}, + {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad}, + {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1}, + {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5}, + {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9}, + {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5}, + {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9}, + {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1}, + {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5}, + {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9}, + {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6}, + {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca}, + {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce}, + {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2}, + {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6}, + {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3}, + {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7}, + {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb}, + {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf}, + {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7}, + {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db}, + {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db}, + {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db}, + {0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db}, + {0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063}, + {0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063}, }; static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = { - { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, - { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, - { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, - { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, - { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, - { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, - { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, - { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, - { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, - { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, - { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, - { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, - { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, - { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, - { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, - { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, - { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, - { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, - { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, - { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, - { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, - { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, - { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, - { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, - { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, - { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, - { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, - { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, - { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, - { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, - { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, - { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, - { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, - { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, - { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, - { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, - { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, - { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, - { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, - { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, - { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, - { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, - { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, - { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, - { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, - { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, - { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, - { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, - { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, - { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, - { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, - { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, - { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, - { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, - { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, - { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, - { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, - { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, - { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310, 0x00009310 }, - { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314, 0x00009314 }, - { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320, 0x00009320 }, - { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324, 0x00009324 }, - { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328, 0x00009328 }, - { 0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c, 0x0000932c }, - { 0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330, 0x00009330 }, - { 0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334, 0x00009334 }, - { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321, 0x00009321 }, - { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325, 0x00009325 }, - { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329, 0x00009329 }, - { 0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d, 0x0000932d }, - { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331, 0x00009331 }, - { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335, 0x00009335 }, - { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322, 0x00009322 }, - { 0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326, 0x00009326 }, - { 0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a, 0x0000932a }, - { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e, 0x0000932e }, - { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332, 0x00009332 }, - { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336, 0x00009336 }, - { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323, 0x00009323 }, - { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327, 0x00009327 }, - { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b, 0x0000932b }, - { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f, 0x0000932f }, - { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333, 0x00009333 }, - { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337, 0x00009337 }, - { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343, 0x00009343 }, - { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347, 0x00009347 }, - { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b, 0x0000934b }, - { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f, 0x0000934f }, - { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353, 0x00009353 }, - { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357, 0x00009357 }, - { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, - { 0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a }, - { 0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a }, + {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290}, + {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300}, + {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304}, + {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308}, + {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c}, + {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000}, + {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004}, + {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008}, + {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c}, + {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080}, + {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084}, + {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088}, + {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c}, + {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100}, + {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104}, + {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108}, + {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c}, + {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110}, + {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114}, + {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180}, + {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184}, + {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188}, + {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c}, + {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190}, + {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194}, + {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0}, + {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c}, + {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8}, + {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284}, + {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288}, + {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224}, + {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290}, + {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300}, + {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304}, + {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308}, + {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c}, + {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380}, + {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384}, + {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700}, + {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704}, + {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708}, + {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c}, + {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780}, + {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784}, + {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00}, + {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04}, + {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08}, + {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c}, + {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80}, + {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84}, + {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88}, + {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c}, + {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90}, + {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80}, + {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84}, + {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88}, + {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c}, + {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90}, + {0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310, 0x00009310}, + {0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314, 0x00009314}, + {0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320, 0x00009320}, + {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324, 0x00009324}, + {0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328, 0x00009328}, + {0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c, 0x0000932c}, + {0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330, 0x00009330}, + {0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334, 0x00009334}, + {0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321, 0x00009321}, + {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325, 0x00009325}, + {0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329, 0x00009329}, + {0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d, 0x0000932d}, + {0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331, 0x00009331}, + {0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335, 0x00009335}, + {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322, 0x00009322}, + {0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326, 0x00009326}, + {0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a, 0x0000932a}, + {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e, 0x0000932e}, + {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332, 0x00009332}, + {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336, 0x00009336}, + {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323, 0x00009323}, + {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327, 0x00009327}, + {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b, 0x0000932b}, + {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f, 0x0000932f}, + {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333, 0x00009333}, + {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337, 0x00009337}, + {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343, 0x00009343}, + {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347, 0x00009347}, + {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b, 0x0000934b}, + {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f, 0x0000934f}, + {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353, 0x00009353}, + {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357, 0x00009357}, + {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b}, + {0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a}, + {0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a}, }; static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = { - { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, - { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce }, - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 }, - { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 }, - { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010, 0x0000c010 }, - { 0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012, 0x00010012 }, - { 0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014, 0x00013014 }, - { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a }, - { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211 }, - { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, - { 0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411, 0x00022411 }, - { 0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413, 0x00025413 }, - { 0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811, 0x00029811 }, - { 0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813, 0x0002c813 }, - { 0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14, 0x00030a14 }, - { 0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50, 0x00035a50 }, - { 0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c, 0x00039c4c }, - { 0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a, 0x0003de8a }, - { 0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92, 0x00042e92 }, - { 0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2, 0x00046ed2 }, - { 0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5, 0x0004bed5 }, - { 0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54, 0x0004ff54 }, - { 0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5, 0x00055fd5 }, - { 0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, - { 0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, - { 0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, - { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, - { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, - { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, + {0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652}, + {0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce}, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002}, + {0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008}, + {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010, 0x0000c010}, + {0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012, 0x00010012}, + {0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014, 0x00013014}, + {0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a}, + {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211}, + {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213}, + {0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411, 0x00022411}, + {0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413, 0x00025413}, + {0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811, 0x00029811}, + {0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813, 0x0002c813}, + {0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14, 0x00030a14}, + {0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50, 0x00035a50}, + {0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c, 0x00039c4c}, + {0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a, 0x0003de8a}, + {0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92, 0x00042e92}, + {0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2, 0x00046ed2}, + {0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5, 0x0004bed5}, + {0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54, 0x0004ff54}, + {0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5, 0x00055fd5}, + {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081}, + {0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff}, + {0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff}, + {0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000}, + {0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000}, + {0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480}, + {0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480}, }; static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = { - { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, - { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce }, - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, - { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, - { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b }, - { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 }, - { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 }, - { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a }, - { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 }, - { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, - { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b }, - { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 }, - { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 }, - { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a }, - { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 }, - { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b }, - { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 }, - { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 }, - { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a }, - { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 }, - { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, - { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, - { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, - { 0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, - { 0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, - { 0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, - { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, - { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, - { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, + {0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652}, + {0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce}, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002}, + {0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009}, + {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b}, + {0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012}, + {0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048}, + {0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a}, + {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211}, + {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213}, + {0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b}, + {0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412}, + {0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414}, + {0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a}, + {0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649}, + {0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b}, + {0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49}, + {0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48}, + {0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a}, + {0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88}, + {0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a}, + {0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9}, + {0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42}, + {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081}, + {0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff}, + {0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff}, + {0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000}, + {0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000}, + {0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480}, + {0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480}, }; static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffc }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffc}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffd }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, -}; - -/* AR9285 Revsion 10*/ -static const u32 ar9285Modes_9285[][6] = { - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, - { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, - { 0x00009844, 0x0372161e, 0x0372161e, 0x03720020, 0x03720020, 0x037216a0 }, - { 0x00009848, 0x00001066, 0x00001066, 0x0000004e, 0x0000004e, 0x00001059 }, - { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, - { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, - { 0x0000985c, 0x3139605e, 0x3139605e, 0x3136605e, 0x3136605e, 0x3139605e }, - { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 }, - { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, - { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, - { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1020, 0xdfbc1020, 0xdfbc1010 }, - { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099b8, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c }, - { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00009a00, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 }, - { 0x00009a04, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 }, - { 0x00009a08, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 }, - { 0x00009a0c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 }, - { 0x00009a10, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 }, - { 0x00009a14, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 }, - { 0x00009a18, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 }, - { 0x00009a1c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, - { 0x00009a20, 0x00000000, 0x00000000, 0x00068114, 0x00068114, 0x00000000 }, - { 0x00009a24, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 }, - { 0x00009a28, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 }, - { 0x00009a2c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 }, - { 0x00009a30, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 }, - { 0x00009a34, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 }, - { 0x00009a38, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 }, - { 0x00009a3c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 }, - { 0x00009a40, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 }, - { 0x00009a44, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 }, - { 0x00009a48, 0x00000000, 0x00000000, 0x00068284, 0x00068284, 0x00000000 }, - { 0x00009a4c, 0x00000000, 0x00000000, 0x00068288, 0x00068288, 0x00000000 }, - { 0x00009a50, 0x00000000, 0x00000000, 0x00068220, 0x00068220, 0x00000000 }, - { 0x00009a54, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 }, - { 0x00009a58, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 }, - { 0x00009a5c, 0x00000000, 0x00000000, 0x00068304, 0x00068304, 0x00000000 }, - { 0x00009a60, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 }, - { 0x00009a64, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 }, - { 0x00009a68, 0x00000000, 0x00000000, 0x00068380, 0x00068380, 0x00000000 }, - { 0x00009a6c, 0x00000000, 0x00000000, 0x00068384, 0x00068384, 0x00000000 }, - { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, - { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, - { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, - { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, - { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, - { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, - { 0x00009a88, 0x00000000, 0x00000000, 0x00068b04, 0x00068b04, 0x00000000 }, - { 0x00009a8c, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 }, - { 0x00009a90, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 }, - { 0x00009a94, 0x00000000, 0x00000000, 0x00068b0c, 0x00068b0c, 0x00000000 }, - { 0x00009a98, 0x00000000, 0x00000000, 0x00068b80, 0x00068b80, 0x00000000 }, - { 0x00009a9c, 0x00000000, 0x00000000, 0x00068b84, 0x00068b84, 0x00000000 }, - { 0x00009aa0, 0x00000000, 0x00000000, 0x00068b88, 0x00068b88, 0x00000000 }, - { 0x00009aa4, 0x00000000, 0x00000000, 0x00068b8c, 0x00068b8c, 0x00000000 }, - { 0x00009aa8, 0x00000000, 0x00000000, 0x000b8b90, 0x000b8b90, 0x00000000 }, - { 0x00009aac, 0x00000000, 0x00000000, 0x000b8f80, 0x000b8f80, 0x00000000 }, - { 0x00009ab0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, - { 0x00009ab4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 }, - { 0x00009ab8, 0x00000000, 0x00000000, 0x000b8f8c, 0x000b8f8c, 0x00000000 }, - { 0x00009abc, 0x00000000, 0x00000000, 0x000b8f90, 0x000b8f90, 0x00000000 }, - { 0x00009ac0, 0x00000000, 0x00000000, 0x000bb30c, 0x000bb30c, 0x00000000 }, - { 0x00009ac4, 0x00000000, 0x00000000, 0x000bb310, 0x000bb310, 0x00000000 }, - { 0x00009ac8, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 }, - { 0x00009acc, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 }, - { 0x00009ad0, 0x00000000, 0x00000000, 0x000bb324, 0x000bb324, 0x00000000 }, - { 0x00009ad4, 0x00000000, 0x00000000, 0x000bb704, 0x000bb704, 0x00000000 }, - { 0x00009ad8, 0x00000000, 0x00000000, 0x000f96a4, 0x000f96a4, 0x00000000 }, - { 0x00009adc, 0x00000000, 0x00000000, 0x000f96a8, 0x000f96a8, 0x00000000 }, - { 0x00009ae0, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 }, - { 0x00009ae4, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 }, - { 0x00009ae8, 0x00000000, 0x00000000, 0x000f9720, 0x000f9720, 0x00000000 }, - { 0x00009aec, 0x00000000, 0x00000000, 0x000f9724, 0x000f9724, 0x00000000 }, - { 0x00009af0, 0x00000000, 0x00000000, 0x000f9728, 0x000f9728, 0x00000000 }, - { 0x00009af4, 0x00000000, 0x00000000, 0x000f972c, 0x000f972c, 0x00000000 }, - { 0x00009af8, 0x00000000, 0x00000000, 0x000f97a0, 0x000f97a0, 0x00000000 }, - { 0x00009afc, 0x00000000, 0x00000000, 0x000f97a4, 0x000f97a4, 0x00000000 }, - { 0x00009b00, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 }, - { 0x00009b04, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 }, - { 0x00009b08, 0x00000000, 0x00000000, 0x000fb7b4, 0x000fb7b4, 0x00000000 }, - { 0x00009b0c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 }, - { 0x00009b10, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 }, - { 0x00009b14, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 }, - { 0x00009b18, 0x00000000, 0x00000000, 0x000fb7ad, 0x000fb7ad, 0x00000000 }, - { 0x00009b1c, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 }, - { 0x00009b20, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 }, - { 0x00009b24, 0x00000000, 0x00000000, 0x000fb7b9, 0x000fb7b9, 0x00000000 }, - { 0x00009b28, 0x00000000, 0x00000000, 0x000fb7c5, 0x000fb7c5, 0x00000000 }, - { 0x00009b2c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 }, - { 0x00009b30, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 }, - { 0x00009b34, 0x00000000, 0x00000000, 0x000fb7d5, 0x000fb7d5, 0x00000000 }, - { 0x00009b38, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 }, - { 0x00009b3c, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 }, - { 0x00009b40, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 }, - { 0x00009b44, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 }, - { 0x00009b48, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 }, - { 0x00009b4c, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 }, - { 0x00009b50, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 }, - { 0x00009b54, 0x00000000, 0x00000000, 0x000fb7c7, 0x000fb7c7, 0x00000000 }, - { 0x00009b58, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 }, - { 0x00009b5c, 0x00000000, 0x00000000, 0x000fb7cf, 0x000fb7cf, 0x00000000 }, - { 0x00009b60, 0x00000000, 0x00000000, 0x000fb7d7, 0x000fb7d7, 0x00000000 }, - { 0x00009b64, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b68, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b6c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b70, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b74, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b78, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b7c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b80, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b84, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b88, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b8c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b90, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b94, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b98, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009b9c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009ba0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009ba4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009ba8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bac, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bb0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bb4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bb8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bbc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bc0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bc4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bc8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bcc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bd0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bd4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bd8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bdc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009be0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009be4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009be8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bec, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bf0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bf4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bf8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x00009bfc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, - { 0x0000aa00, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 }, - { 0x0000aa04, 0x00000000, 0x00000000, 0x00068080, 0x00068080, 0x00000000 }, - { 0x0000aa08, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 }, - { 0x0000aa0c, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 }, - { 0x0000aa10, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 }, - { 0x0000aa14, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 }, - { 0x0000aa18, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 }, - { 0x0000aa1c, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 }, - { 0x0000aa20, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 }, - { 0x0000aa24, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, - { 0x0000aa28, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, - { 0x0000aa2c, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 }, - { 0x0000aa30, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 }, - { 0x0000aa34, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 }, - { 0x0000aa38, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 }, - { 0x0000aa3c, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 }, - { 0x0000aa40, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 }, - { 0x0000aa44, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 }, - { 0x0000aa48, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 }, - { 0x0000aa4c, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 }, - { 0x0000aa50, 0x00000000, 0x00000000, 0x000681ac, 0x000681ac, 0x00000000 }, - { 0x0000aa54, 0x00000000, 0x00000000, 0x0006821c, 0x0006821c, 0x00000000 }, - { 0x0000aa58, 0x00000000, 0x00000000, 0x00068224, 0x00068224, 0x00000000 }, - { 0x0000aa5c, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 }, - { 0x0000aa60, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 }, - { 0x0000aa64, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 }, - { 0x0000aa68, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 }, - { 0x0000aa6c, 0x00000000, 0x00000000, 0x00068310, 0x00068310, 0x00000000 }, - { 0x0000aa70, 0x00000000, 0x00000000, 0x00068788, 0x00068788, 0x00000000 }, - { 0x0000aa74, 0x00000000, 0x00000000, 0x0006878c, 0x0006878c, 0x00000000 }, - { 0x0000aa78, 0x00000000, 0x00000000, 0x00068790, 0x00068790, 0x00000000 }, - { 0x0000aa7c, 0x00000000, 0x00000000, 0x00068794, 0x00068794, 0x00000000 }, - { 0x0000aa80, 0x00000000, 0x00000000, 0x00068798, 0x00068798, 0x00000000 }, - { 0x0000aa84, 0x00000000, 0x00000000, 0x0006879c, 0x0006879c, 0x00000000 }, - { 0x0000aa88, 0x00000000, 0x00000000, 0x00068b89, 0x00068b89, 0x00000000 }, - { 0x0000aa8c, 0x00000000, 0x00000000, 0x00068b8d, 0x00068b8d, 0x00000000 }, - { 0x0000aa90, 0x00000000, 0x00000000, 0x00068b91, 0x00068b91, 0x00000000 }, - { 0x0000aa94, 0x00000000, 0x00000000, 0x00068b95, 0x00068b95, 0x00000000 }, - { 0x0000aa98, 0x00000000, 0x00000000, 0x00068b99, 0x00068b99, 0x00000000 }, - { 0x0000aa9c, 0x00000000, 0x00000000, 0x00068ba5, 0x00068ba5, 0x00000000 }, - { 0x0000aaa0, 0x00000000, 0x00000000, 0x00068ba9, 0x00068ba9, 0x00000000 }, - { 0x0000aaa4, 0x00000000, 0x00000000, 0x00068bad, 0x00068bad, 0x00000000 }, - { 0x0000aaa8, 0x00000000, 0x00000000, 0x000b8b0c, 0x000b8b0c, 0x00000000 }, - { 0x0000aaac, 0x00000000, 0x00000000, 0x000b8f10, 0x000b8f10, 0x00000000 }, - { 0x0000aab0, 0x00000000, 0x00000000, 0x000b8f14, 0x000b8f14, 0x00000000 }, - { 0x0000aab4, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, - { 0x0000aab8, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, - { 0x0000aabc, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 }, - { 0x0000aac0, 0x00000000, 0x00000000, 0x000bb380, 0x000bb380, 0x00000000 }, - { 0x0000aac4, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 }, - { 0x0000aac8, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 }, - { 0x0000aacc, 0x00000000, 0x00000000, 0x000bb38c, 0x000bb38c, 0x00000000 }, - { 0x0000aad0, 0x00000000, 0x00000000, 0x000bb394, 0x000bb394, 0x00000000 }, - { 0x0000aad4, 0x00000000, 0x00000000, 0x000bb798, 0x000bb798, 0x00000000 }, - { 0x0000aad8, 0x00000000, 0x00000000, 0x000f970c, 0x000f970c, 0x00000000 }, - { 0x0000aadc, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 }, - { 0x0000aae0, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 }, - { 0x0000aae4, 0x00000000, 0x00000000, 0x000f9718, 0x000f9718, 0x00000000 }, - { 0x0000aae8, 0x00000000, 0x00000000, 0x000f9705, 0x000f9705, 0x00000000 }, - { 0x0000aaec, 0x00000000, 0x00000000, 0x000f9709, 0x000f9709, 0x00000000 }, - { 0x0000aaf0, 0x00000000, 0x00000000, 0x000f970d, 0x000f970d, 0x00000000 }, - { 0x0000aaf4, 0x00000000, 0x00000000, 0x000f9711, 0x000f9711, 0x00000000 }, - { 0x0000aaf8, 0x00000000, 0x00000000, 0x000f9715, 0x000f9715, 0x00000000 }, - { 0x0000aafc, 0x00000000, 0x00000000, 0x000f9719, 0x000f9719, 0x00000000 }, - { 0x0000ab00, 0x00000000, 0x00000000, 0x000fb7a4, 0x000fb7a4, 0x00000000 }, - { 0x0000ab04, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 }, - { 0x0000ab08, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 }, - { 0x0000ab0c, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 }, - { 0x0000ab10, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 }, - { 0x0000ab14, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 }, - { 0x0000ab18, 0x00000000, 0x00000000, 0x000fb7bc, 0x000fb7bc, 0x00000000 }, - { 0x0000ab1c, 0x00000000, 0x00000000, 0x000fb7a1, 0x000fb7a1, 0x00000000 }, - { 0x0000ab20, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 }, - { 0x0000ab24, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 }, - { 0x0000ab28, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 }, - { 0x0000ab2c, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 }, - { 0x0000ab30, 0x00000000, 0x00000000, 0x000fb7bd, 0x000fb7bd, 0x00000000 }, - { 0x0000ab34, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 }, - { 0x0000ab38, 0x00000000, 0x00000000, 0x000fb7cd, 0x000fb7cd, 0x00000000 }, - { 0x0000ab3c, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 }, - { 0x0000ab40, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 }, - { 0x0000ab44, 0x00000000, 0x00000000, 0x000fb7c2, 0x000fb7c2, 0x00000000 }, - { 0x0000ab48, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 }, - { 0x0000ab4c, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 }, - { 0x0000ab50, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 }, - { 0x0000ab54, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 }, - { 0x0000ab58, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 }, - { 0x0000ab5c, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 }, - { 0x0000ab60, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 }, - { 0x0000ab64, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab68, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab6c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab70, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab74, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab78, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab7c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab80, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab84, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab88, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab8c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab90, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab94, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab98, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000ab9c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000aba0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000aba4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000aba8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abac, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abb0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abb4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abb8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abbc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abc0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abc4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abc8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abcc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abd0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abd4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abd8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abdc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abe0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abe4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abe8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abec, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abf0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abf4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abf8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000abfc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, - { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, - { 0x0000a20c, 0x00000014, 0x00000014, 0x00000000, 0x00000000, 0x0001f000 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a250, 0x001ff000, 0x001ff000, 0x001ca000, 0x001ca000, 0x001da000 }, - { 0x0000a274, 0x0a81c652, 0x0a81c652, 0x0a820652, 0x0a820652, 0x0a82a652 }, - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00007201, 0x00007201, 0x00000000 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00010408, 0x00010408, 0x00000000 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x0001860a, 0x0001860a, 0x00000000 }, - { 0x0000a310, 0x00000000, 0x00000000, 0x00020818, 0x00020818, 0x00000000 }, - { 0x0000a314, 0x00000000, 0x00000000, 0x00024858, 0x00024858, 0x00000000 }, - { 0x0000a318, 0x00000000, 0x00000000, 0x00026859, 0x00026859, 0x00000000 }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x0002985b, 0x0002985b, 0x00000000 }, - { 0x0000a320, 0x00000000, 0x00000000, 0x0002c89a, 0x0002c89a, 0x00000000 }, - { 0x0000a324, 0x00000000, 0x00000000, 0x0002e89b, 0x0002e89b, 0x00000000 }, - { 0x0000a328, 0x00000000, 0x00000000, 0x0003089c, 0x0003089c, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0003289d, 0x0003289d, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x0003489e, 0x0003489e, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x000388de, 0x000388de, 0x00000000 }, - { 0x0000a338, 0x00000000, 0x00000000, 0x0003b91e, 0x0003b91e, 0x00000000 }, - { 0x0000a33c, 0x00000000, 0x00000000, 0x0003d95e, 0x0003d95e, 0x00000000 }, - { 0x0000a340, 0x00000000, 0x00000000, 0x000419df, 0x000419df, 0x00000000 }, - { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, -}; - -static const u32 ar9285Common_9285[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020045 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00004024, 0x0000001f }, - { 0x00004060, 0x00000000 }, - { 0x00004064, 0x00000000 }, - { 0x00007010, 0x00000031 }, - { 0x00007034, 0x00000002 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x00000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x00008070, 0x00000000 }, - { 0x000080c0, 0x2a80001a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008120, 0x08f04800 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0x00000000 }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c0, 0x00000000 }, - { 0x000081d0, 0x00003210 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008264, 0x88a00010 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x0000829c, 0x00000000 }, - { 0x00008300, 0x00000040 }, - { 0x00008314, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000001 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00000000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x00010380 }, - { 0x00008344, 0x00481043 }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xafe68e30 }, - { 0x00009810, 0xfd14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x0000984c, 0x0040233c }, - { 0x00009854, 0x00000044 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x00009910, 0x01002310 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x04900000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009940, 0x14750604 }, - { 0x00009948, 0x9280c00a }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5f3ca3de }, - { 0x00009958, 0x2108ecff }, - { 0x00009968, 0x000003ce }, - { 0x00009970, 0x1927b515 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x2def0a00 }, - { 0x000099b0, 0x03051000 }, - { 0x000099b4, 0x00000820 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000000 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099f0, 0x00000000 }, - { 0x0000a208, 0x803e6788 }, - { 0x0000a210, 0x4080a333 }, - { 0x0000a214, 0x00206c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x01834061 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x000003b5 }, - { 0x0000a22c, 0x00000000 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a244, 0x00000000 }, - { 0x0000a248, 0xfffffffc }, - { 0x0000a24c, 0x00000000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0ccb5380 }, - { 0x0000a25c, 0x15151501 }, - { 0x0000a260, 0xdfa90f01 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0ebae9e6 }, - { 0x0000d270, 0x0d820820 }, - { 0x0000a278, 0x39ce739c }, - { 0x0000a27c, 0x050e039c }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x39ce739c }, - { 0x0000a398, 0x0000039c }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x39ce739c }, - { 0x0000a3e0, 0x0000039c }, - { 0x0000a3e4, 0x00000000 }, - { 0x0000a3e8, 0x18c43433 }, - { 0x0000a3ec, 0x00f70081 }, - { 0x00007800, 0x00140000 }, - { 0x00007804, 0x0e4548d8 }, - { 0x00007808, 0x54214514 }, - { 0x0000780c, 0x02025820 }, - { 0x00007810, 0x71c0d388 }, - { 0x00007814, 0x924934a8 }, - { 0x0000781c, 0x00000000 }, - { 0x00007820, 0x00000c04 }, - { 0x00007824, 0x00d86fff }, - { 0x00007828, 0x26d2491b }, - { 0x0000782c, 0x6e36d97b }, - { 0x00007830, 0xedb6d96c }, - { 0x00007834, 0x71400086 }, - { 0x00007838, 0xfac68800 }, - { 0x0000783c, 0x0001fffe }, - { 0x00007840, 0xffeb1a20 }, - { 0x00007844, 0x000c0db6 }, - { 0x00007848, 0x6db61b6f }, - { 0x0000784c, 0x6d9b66db }, - { 0x00007850, 0x6d8c6dba }, - { 0x00007854, 0x00040000 }, - { 0x00007858, 0xdb003012 }, - { 0x0000785c, 0x04924914 }, - { 0x00007860, 0x21084210 }, - { 0x00007864, 0xf7d7ffde }, - { 0x00007868, 0xc2034080 }, - { 0x0000786c, 0x48609eb4 }, - { 0x00007870, 0x10142c00 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffd}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffd }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffd}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffc }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffc}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; -/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */ static const u32 ar9285Modes_9285_1_2[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, - { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, - { 0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0 }, - { 0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, - { 0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, - { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, - { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, - { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, - { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 }, - { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, - { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, - { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010 }, - { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c }, - { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, - { 0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, - { 0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, - { 0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, - { 0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, - { 0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, - { 0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, - { 0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, - { 0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, - { 0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, - { 0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, - { 0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, - { 0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, - { 0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, - { 0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, - { 0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, - { 0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, - { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, - { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, - { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, - { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, - { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, - { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, - { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, - { 0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, - { 0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, - { 0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, - { 0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, - { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, - { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, - { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, - { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, - { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, - { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, - { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, - { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, - { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, - { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, - { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, - { 0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, - { 0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, - { 0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, - { 0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, - { 0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, - { 0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, - { 0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, - { 0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, - { 0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, - { 0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, - { 0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, - { 0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, - { 0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, - { 0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, - { 0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, - { 0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, - { 0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, - { 0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, - { 0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, - { 0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, - { 0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, - { 0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, - { 0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, - { 0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, - { 0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, - { 0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, - { 0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, - { 0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, - { 0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, - { 0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, - { 0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, - { 0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, - { 0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, - { 0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, - { 0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, - { 0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, - { 0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, - { 0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, - { 0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, - { 0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, - { 0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, - { 0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, - { 0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, - { 0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, - { 0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, - { 0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, - { 0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, - { 0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, - { 0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, - { 0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, - { 0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, - { 0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, - { 0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, - { 0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, - { 0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, - { 0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, - { 0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, - { 0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, - { 0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, - { 0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, - { 0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, - { 0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, - { 0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, - { 0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, - { 0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, - { 0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, - { 0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, - { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, - { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, - { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, - { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, - { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, - { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, - { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, - { 0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, - { 0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, - { 0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, - { 0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, - { 0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, - { 0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, - { 0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, - { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, - { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, - { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, - { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, - { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, - { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, - { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, - { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, - { 0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, - { 0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, - { 0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, - { 0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, - { 0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, - { 0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, - { 0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, - { 0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, - { 0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, - { 0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, - { 0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, - { 0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, - { 0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, - { 0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, - { 0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, - { 0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, - { 0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, - { 0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, - { 0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, - { 0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, - { 0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, - { 0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, - { 0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, - { 0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, - { 0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, - { 0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, - { 0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, - { 0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, - { 0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, - { 0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, - { 0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, - { 0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, - { 0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, - { 0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, - { 0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, - { 0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, - { 0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, - { 0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, - { 0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, - { 0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, - { 0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, - { 0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, - { 0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, - { 0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, - { 0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, - { 0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, - { 0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, - { 0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, - { 0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, - { 0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, - { 0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, - { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, - { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, - { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180}, + {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0}, + {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f}, + {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880}, + {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303}, + {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200}, + {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, + {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001}, + {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007}, + {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e}, + {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0}, + {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059}, + {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059}, + {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2}, + {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e}, + {0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18}, + {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00}, + {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881}, + {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0}, + {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016}, + {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d}, + {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010}, + {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c}, + {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00}, + {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, + {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77}, + {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f}, + {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8}, + {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384}, + {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000}, + {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000}, + {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000}, + {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000}, + {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000}, + {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000}, + {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000}, + {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000}, + {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000}, + {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000}, + {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000}, + {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000}, + {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000}, + {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000}, + {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000}, + {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000}, + {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000}, + {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000}, + {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000}, + {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000}, + {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000}, + {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000}, + {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000}, + {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000}, + {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000}, + {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000}, + {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000}, + {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000}, + {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000}, + {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000}, + {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000}, + {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000}, + {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000}, + {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000}, + {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000}, + {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000}, + {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000}, + {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000}, + {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000}, + {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000}, + {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000}, + {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000}, + {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000}, + {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000}, + {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000}, + {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000}, + {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000}, + {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000}, + {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000}, + {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000}, + {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000}, + {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000}, + {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000}, + {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000}, + {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000}, + {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000}, + {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000}, + {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000}, + {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000}, + {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000}, + {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000}, + {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000}, + {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000}, + {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000}, + {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000}, + {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000}, + {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000}, + {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000}, + {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000}, + {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000}, + {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000}, + {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000}, + {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000}, + {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000}, + {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000}, + {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000}, + {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000}, + {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000}, + {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000}, + {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000}, + {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000}, + {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000}, + {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000}, + {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000}, + {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000}, + {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000}, + {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000}, + {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000}, + {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000}, + {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000}, + {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000}, + {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000}, + {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000}, + {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000}, + {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000}, + {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000}, + {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000}, + {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000}, + {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000}, + {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000}, + {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000}, + {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000}, + {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000}, + {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000}, + {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000}, + {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000}, + {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000}, + {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000}, + {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000}, + {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000}, + {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000}, + {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000}, + {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000}, + {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000}, + {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000}, + {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000}, + {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000}, + {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000}, + {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000}, + {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000}, + {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000}, + {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000}, + {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000}, + {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000}, + {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000}, + {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000}, + {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000}, + {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000}, + {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000}, + {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000}, + {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000}, + {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000}, + {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000}, + {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000}, + {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000}, + {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000}, + {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000}, + {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000}, + {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000}, + {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000}, + {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000}, + {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000}, + {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000}, + {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000}, + {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000}, + {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000}, + {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000}, + {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000}, + {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000}, + {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000}, + {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000}, + {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000}, + {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000}, + {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000}, + {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000}, + {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000}, + {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000}, + {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000}, + {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000}, + {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000}, + {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000}, + {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000}, + {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000}, + {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000}, + {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000}, + {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000}, + {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000}, + {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000}, + {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000}, + {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000}, + {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000}, + {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000}, + {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000}, + {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000}, + {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000}, + {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000}, + {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000}, + {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000}, + {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004}, + {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000}, + {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000}, + {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a}, + {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000}, + {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000}, + {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e}, }; static const u32 ar9285Common_9285_1_2[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020045 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00004024, 0x0000001f }, - { 0x00004060, 0x00000000 }, - { 0x00004064, 0x00000000 }, - { 0x00007010, 0x00000031 }, - { 0x00007034, 0x00000002 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x00000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x00008070, 0x00000000 }, - { 0x000080c0, 0x2a80001a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008120, 0x08f04810 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0xffffffff }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c0, 0x00000000 }, - { 0x000081d0, 0x0000320a }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008264, 0x88a00010 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x0000829c, 0x00000000 }, - { 0x00008300, 0x00000040 }, - { 0x00008314, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000001 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00ff0000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x00010380 }, - { 0x00008344, 0x00481043 }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xafe68e30 }, - { 0x00009810, 0xfd14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x0000984c, 0x0040233c }, - { 0x00009854, 0x00000044 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x00009910, 0x01002310 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x04900000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009940, 0x14750604 }, - { 0x00009948, 0x9280c00a }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5f3ca3de }, - { 0x00009958, 0x2108ecff }, - { 0x00009968, 0x000003ce }, - { 0x00009970, 0x192bb514 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x2def0400 }, - { 0x000099b0, 0x03051000 }, - { 0x000099b4, 0x00000820 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000000 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099f0, 0x00000000 }, - { 0x0000a208, 0x803e68c8 }, - { 0x0000a210, 0x4080a333 }, - { 0x0000a214, 0x00206c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x01834061 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x000003b5 }, - { 0x0000a22c, 0x00000000 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a244, 0x00000000 }, - { 0x0000a248, 0xfffffffc }, - { 0x0000a24c, 0x00000000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0ccb5380 }, - { 0x0000a25c, 0x15151501 }, - { 0x0000a260, 0xdfa90f01 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0ebae9e6 }, - { 0x0000d270, 0x0d820820 }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3e4, 0x00000000 }, - { 0x0000a3e8, 0x18c43433 }, - { 0x0000a3ec, 0x00f70081 }, - { 0x00007800, 0x00140000 }, - { 0x00007804, 0x0e4548d8 }, - { 0x00007808, 0x54214514 }, - { 0x0000780c, 0x02025830 }, - { 0x00007810, 0x71c0d388 }, - { 0x0000781c, 0x00000000 }, - { 0x00007824, 0x00d86fff }, - { 0x0000782c, 0x6e36d97b }, - { 0x00007834, 0x71400087 }, - { 0x00007844, 0x000c0db6 }, - { 0x00007848, 0x6db6246f }, - { 0x0000784c, 0x6d9b66db }, - { 0x00007850, 0x6d8c6dba }, - { 0x00007854, 0x00040000 }, - { 0x00007858, 0xdb003012 }, - { 0x0000785c, 0x04924914 }, - { 0x00007860, 0x21084210 }, - { 0x00007864, 0xf7d7ffde }, - { 0x00007868, 0xc2034080 }, - { 0x00007870, 0x10142c00 }, + /* Addr allmodes */ + {0x0000000c, 0x00000000}, + {0x00000030, 0x00020045}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000008}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00000054, 0x0000001f}, + {0x00000800, 0x00000000}, + {0x00000804, 0x00000000}, + {0x00000808, 0x00000000}, + {0x0000080c, 0x00000000}, + {0x00000810, 0x00000000}, + {0x00000814, 0x00000000}, + {0x00000818, 0x00000000}, + {0x0000081c, 0x00000000}, + {0x00000820, 0x00000000}, + {0x00000824, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x00001230, 0x00000000}, + {0x00001270, 0x00000000}, + {0x00001038, 0x00000000}, + {0x00001078, 0x00000000}, + {0x000010b8, 0x00000000}, + {0x000010f8, 0x00000000}, + {0x00001138, 0x00000000}, + {0x00001178, 0x00000000}, + {0x000011b8, 0x00000000}, + {0x000011f8, 0x00000000}, + {0x00001238, 0x00000000}, + {0x00001278, 0x00000000}, + {0x000012b8, 0x00000000}, + {0x000012f8, 0x00000000}, + {0x00001338, 0x00000000}, + {0x00001378, 0x00000000}, + {0x000013b8, 0x00000000}, + {0x000013f8, 0x00000000}, + {0x00001438, 0x00000000}, + {0x00001478, 0x00000000}, + {0x000014b8, 0x00000000}, + {0x000014f8, 0x00000000}, + {0x00001538, 0x00000000}, + {0x00001578, 0x00000000}, + {0x000015b8, 0x00000000}, + {0x000015f8, 0x00000000}, + {0x00001638, 0x00000000}, + {0x00001678, 0x00000000}, + {0x000016b8, 0x00000000}, + {0x000016f8, 0x00000000}, + {0x00001738, 0x00000000}, + {0x00001778, 0x00000000}, + {0x000017b8, 0x00000000}, + {0x000017f8, 0x00000000}, + {0x0000103c, 0x00000000}, + {0x0000107c, 0x00000000}, + {0x000010bc, 0x00000000}, + {0x000010fc, 0x00000000}, + {0x0000113c, 0x00000000}, + {0x0000117c, 0x00000000}, + {0x000011bc, 0x00000000}, + {0x000011fc, 0x00000000}, + {0x0000123c, 0x00000000}, + {0x0000127c, 0x00000000}, + {0x000012bc, 0x00000000}, + {0x000012fc, 0x00000000}, + {0x0000133c, 0x00000000}, + {0x0000137c, 0x00000000}, + {0x000013bc, 0x00000000}, + {0x000013fc, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00004030, 0x00000002}, + {0x0000403c, 0x00000002}, + {0x00004024, 0x0000001f}, + {0x00004060, 0x00000000}, + {0x00004064, 0x00000000}, + {0x00007010, 0x00000031}, + {0x00007034, 0x00000002}, + {0x00007038, 0x000004c2}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000700}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008048, 0x00000000}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x00008070, 0x00000000}, + {0x000080c0, 0x2a80001a}, + {0x000080c4, 0x05dc01e0}, + {0x000080c8, 0x1f402710}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00001e00}, + {0x000080d4, 0x00000000}, + {0x000080d8, 0x00400000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x003f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080f8, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00020000}, + {0x00008104, 0x00000001}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000168}, + {0x00008118, 0x000100aa}, + {0x0000811c, 0x00003210}, + {0x00008120, 0x08f04810}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x00000000}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x32143320}, + {0x00008174, 0xfaa4fa50}, + {0x00008178, 0x00000100}, + {0x0000817c, 0x00000000}, + {0x000081c0, 0x00000000}, + {0x000081d0, 0x0000320a}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008200, 0x00000000}, + {0x00008204, 0x00000000}, + {0x00008208, 0x00000000}, + {0x0000820c, 0x00000000}, + {0x00008210, 0x00000000}, + {0x00008214, 0x00000000}, + {0x00008218, 0x00000000}, + {0x0000821c, 0x00000000}, + {0x00008220, 0x00000000}, + {0x00008224, 0x00000000}, + {0x00008228, 0x00000000}, + {0x0000822c, 0x00000000}, + {0x00008230, 0x00000000}, + {0x00008234, 0x00000000}, + {0x00008238, 0x00000000}, + {0x0000823c, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000100}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x400000ff}, + {0x00008260, 0x00080922}, + {0x00008264, 0x88a00010}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000000}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x00000000}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x0000829c, 0x00000000}, + {0x00008300, 0x00000040}, + {0x00008314, 0x00000000}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000001}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000e00}, + {0x00008338, 0x00ff0000}, + {0x0000833c, 0x00000000}, + {0x00008340, 0x00010380}, + {0x00008344, 0x00481043}, + {0x00009808, 0x00000000}, + {0x0000980c, 0xafe68e30}, + {0x00009810, 0xfd14e000}, + {0x00009814, 0x9c0a9f6b}, + {0x0000981c, 0x00000000}, + {0x0000982c, 0x0000a000}, + {0x00009830, 0x00000000}, + {0x0000983c, 0x00200400}, + {0x0000984c, 0x0040233c}, + {0x00009854, 0x00000044}, + {0x00009900, 0x00000000}, + {0x00009904, 0x00000000}, + {0x00009908, 0x00000000}, + {0x0000990c, 0x00000000}, + {0x00009910, 0x01002310}, + {0x0000991c, 0x10000fff}, + {0x00009920, 0x04900000}, + {0x00009928, 0x00000001}, + {0x0000992c, 0x00000004}, + {0x00009934, 0x1e1f2022}, + {0x00009938, 0x0a0b0c0d}, + {0x0000993c, 0x00000000}, + {0x00009940, 0x14750604}, + {0x00009948, 0x9280c00a}, + {0x0000994c, 0x00020028}, + {0x00009954, 0x5f3ca3de}, + {0x00009958, 0x2108ecff}, + {0x00009968, 0x000003ce}, + {0x00009970, 0x192bb514}, + {0x00009974, 0x00000000}, + {0x00009978, 0x00000001}, + {0x0000997c, 0x00000000}, + {0x00009980, 0x00000000}, + {0x00009984, 0x00000000}, + {0x00009988, 0x00000000}, + {0x0000998c, 0x00000000}, + {0x00009990, 0x00000000}, + {0x00009994, 0x00000000}, + {0x00009998, 0x00000000}, + {0x0000999c, 0x00000000}, + {0x000099a0, 0x00000000}, + {0x000099a4, 0x00000001}, + {0x000099a8, 0x201fff00}, + {0x000099ac, 0x2def0400}, + {0x000099b0, 0x03051000}, + {0x000099b4, 0x00000820}, + {0x000099dc, 0x00000000}, + {0x000099e0, 0x00000000}, + {0x000099e4, 0xaaaaaaaa}, + {0x000099e8, 0x3c466478}, + {0x000099ec, 0x0cc80caa}, + {0x000099f0, 0x00000000}, + {0x0000a208, 0x803e68c8}, + {0x0000a210, 0x4080a333}, + {0x0000a214, 0x00206c10}, + {0x0000a218, 0x009c4060}, + {0x0000a220, 0x01834061}, + {0x0000a224, 0x00000400}, + {0x0000a228, 0x000003b5}, + {0x0000a22c, 0x00000000}, + {0x0000a234, 0x20202020}, + {0x0000a238, 0x20202020}, + {0x0000a244, 0x00000000}, + {0x0000a248, 0xfffffffc}, + {0x0000a24c, 0x00000000}, + {0x0000a254, 0x00000000}, + {0x0000a258, 0x0ccb5380}, + {0x0000a25c, 0x15151501}, + {0x0000a260, 0xdfa90f01}, + {0x0000a268, 0x00000000}, + {0x0000a26c, 0x0ebae9e6}, + {0x0000d270, 0x0d820820}, + {0x0000d35c, 0x07ffffef}, + {0x0000d360, 0x0fffffe7}, + {0x0000d364, 0x17ffffe5}, + {0x0000d368, 0x1fffffe4}, + {0x0000d36c, 0x37ffffe3}, + {0x0000d370, 0x3fffffe3}, + {0x0000d374, 0x57ffffe3}, + {0x0000d378, 0x5fffffe2}, + {0x0000d37c, 0x7fffffe2}, + {0x0000d380, 0x7f3c7bba}, + {0x0000d384, 0xf3307ff0}, + {0x0000a388, 0x0c000000}, + {0x0000a38c, 0x20202020}, + {0x0000a390, 0x20202020}, + {0x0000a39c, 0x00000001}, + {0x0000a3a0, 0x00000000}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0x00000000}, + {0x0000a3ac, 0x00000000}, + {0x0000a3b0, 0x00000000}, + {0x0000a3b4, 0x00000000}, + {0x0000a3b8, 0x00000000}, + {0x0000a3bc, 0x00000000}, + {0x0000a3c0, 0x00000000}, + {0x0000a3c4, 0x00000000}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3e4, 0x00000000}, + {0x0000a3e8, 0x18c43433}, + {0x0000a3ec, 0x00f70081}, + {0x00007800, 0x00140000}, + {0x00007804, 0x0e4548d8}, + {0x00007808, 0x54214514}, + {0x0000780c, 0x02025830}, + {0x00007810, 0x71c0d388}, + {0x0000781c, 0x00000000}, + {0x00007824, 0x00d86fff}, + {0x0000782c, 0x6e36d97b}, + {0x00007834, 0x71400087}, + {0x00007844, 0x000c0db6}, + {0x00007848, 0x6db6246f}, + {0x0000784c, 0x6d9b66db}, + {0x00007850, 0x6d8c6dba}, + {0x00007854, 0x00040000}, + {0x00007858, 0xdb003012}, + {0x0000785c, 0x04924914}, + {0x00007860, 0x21084210}, + {0x00007864, 0xf7d7ffde}, + {0x00007868, 0xc2034080}, + {0x00007870, 0x10142c00}, }; static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 }, - { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 }, - { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 }, - { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 }, - { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 }, - { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 }, - { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, - { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, - { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 }, - { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b }, - { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e }, - { 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 }, - { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe }, - { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 }, - { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe }, - { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 }, - { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, - { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 }, - { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, - { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, - { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, - { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000}, + {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000}, + {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000}, + {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000}, + {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000}, + {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000}, + {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000}, + {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000}, + {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000}, + {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000}, + {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000}, + {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000}, + {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000}, + {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8}, + {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b}, + {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e}, + {0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803}, + {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe}, + {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20}, + {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe}, + {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00}, + {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652}, + {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7}, + {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7}, + {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7}, + {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7}, + {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7}, + {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7}, }; static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, - { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 }, - { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 }, - { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 }, - { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 }, - { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 }, - { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, - { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, - { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 }, - { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b }, - { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e }, - { 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 }, - { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe }, - { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 }, - { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, - { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, - { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 }, - { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, - { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c }, - { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, - { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, - { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, - { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000}, + {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000}, + {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000}, + {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000}, + {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000}, + {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000}, + {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000}, + {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000}, + {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000}, + {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000}, + {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000}, + {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000}, + {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000}, + {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8}, + {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b}, + {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e}, + {0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801}, + {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe}, + {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20}, + {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4}, + {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04}, + {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652}, + {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c}, + {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c}, + {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c}, + {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c}, + {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c}, + {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c}, }; static const u32 ar9285Modes_XE2_0_normal_power[][6] = { - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, - { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 }, - { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 }, - { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 }, - { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 }, - { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 }, - { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, - { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, - { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 }, - { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b }, - { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae }, - { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 }, - { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe }, - { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c }, - { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, - { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, - { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 }, - { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, - { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c }, - { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, - { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, - { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, - { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000}, + {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000}, + {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000}, + {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000}, + {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000}, + {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000}, + {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000}, + {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000}, + {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000}, + {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000}, + {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000}, + {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000}, + {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000}, + {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8}, + {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b}, + {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae}, + {0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441}, + {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe}, + {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c}, + {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4}, + {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04}, + {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652}, + {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c}, + {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c}, + {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c}, + {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c}, + {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c}, + {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c}, }; static const u32 ar9285Modes_XE2_0_high_power[][6] = { - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 }, - { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 }, - { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 }, - { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 }, - { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 }, - { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 }, - { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 }, - { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, - { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, - { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 }, - { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b }, - { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e }, - { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 }, - { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe }, - { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c }, - { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe }, - { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 }, - { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, - { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 }, - { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, - { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, - { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, - { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000}, + {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000}, + {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000}, + {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000}, + {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000}, + {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000}, + {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000}, + {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000}, + {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000}, + {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000}, + {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000}, + {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000}, + {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000}, + {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8}, + {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b}, + {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e}, + {0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443}, + {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe}, + {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c}, + {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe}, + {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00}, + {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652}, + {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7}, + {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7}, + {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7}, + {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7}, + {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7}, + {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7}, }; static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffd }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffd}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffc }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, -}; - -/* AR9287 Revision 10 */ -static const u32 ar9287Modes_9287_1_0[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, - { 0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f }, - { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, - { 0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a }, - { 0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880 }, - { 0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e }, - { 0x00009828, 0x00000000, 0x00000000, 0x0a020001, 0x0a020001, 0x0a020001 }, - { 0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e }, - { 0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0 }, - { 0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, - { 0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, - { 0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e }, - { 0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18 }, - { 0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, - { 0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, - { 0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010 }, - { 0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, - { 0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, - { 0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210 }, - { 0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce }, - { 0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c }, - { 0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, - { 0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444 }, - { 0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000 }, - { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, - { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, -}; - -static const u32 ar9287Common_9287_1_0[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020015 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00004024, 0x0000001f }, - { 0x00004060, 0x00000000 }, - { 0x00004064, 0x00000000 }, - { 0x00007010, 0x00000033 }, - { 0x00007020, 0x00000000 }, - { 0x00007034, 0x00000002 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x40000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x00008070, 0x00000000 }, - { 0x000080c0, 0x2a80001a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0xffffffff }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x18487320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c0, 0x00000000 }, - { 0x000081c4, 0x00000000 }, - { 0x000081d4, 0x00000000 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008264, 0x88a00010 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x000000ff }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x0000829c, 0x00000000 }, - { 0x00008300, 0x00000040 }, - { 0x00008314, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000007 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00ff0000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x000107ff }, - { 0x00008344, 0x01c81043 }, - { 0x00008360, 0xffffffff }, - { 0x00008364, 0xffffffff }, - { 0x00008368, 0x00000000 }, - { 0x00008370, 0x00000000 }, - { 0x00008374, 0x000000ff }, - { 0x00008378, 0x00000000 }, - { 0x0000837c, 0x00000000 }, - { 0x00008380, 0xffffffff }, - { 0x00008384, 0xffffffff }, - { 0x00008390, 0x0fffffff }, - { 0x00008394, 0x0fffffff }, - { 0x00008398, 0x00000000 }, - { 0x0000839c, 0x00000000 }, - { 0x000083a0, 0x00000000 }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xafe68e30 }, - { 0x00009810, 0xfd14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x0000984c, 0x0040233c }, - { 0x0000a84c, 0x0040233c }, - { 0x00009854, 0x00000044 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x00009910, 0x10002310 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x04900000 }, - { 0x0000a920, 0x04900000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009930, 0x00000000 }, - { 0x0000a930, 0x00000000 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009948, 0x9280c00a }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5f3ca3de }, - { 0x00009958, 0x0108ecff }, - { 0x00009940, 0x14750604 }, - { 0x0000c95c, 0x004b6a8e }, - { 0x00009970, 0x990bb515 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x0c6f0000 }, - { 0x000099b0, 0x03051000 }, - { 0x000099b4, 0x00000820 }, - { 0x000099c4, 0x06336f77 }, - { 0x000099c8, 0x6af65329 }, - { 0x000099cc, 0x08f186c8 }, - { 0x000099d0, 0x00046384 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000000 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099f0, 0x00000000 }, - { 0x000099fc, 0x00001042 }, - { 0x0000a1f4, 0x00fffeff }, - { 0x0000a1f8, 0x00f5f9ff }, - { 0x0000a1fc, 0xb79f6427 }, - { 0x0000a208, 0x803e4788 }, - { 0x0000a210, 0x4080a333 }, - { 0x0000a214, 0x40206c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x01834061 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x000003b5 }, - { 0x0000a22c, 0x233f7180 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a23c, 0x13c889af }, - { 0x0000a240, 0x38490a20 }, - { 0x0000a244, 0x00000000 }, - { 0x0000a248, 0xfffffffc }, - { 0x0000a24c, 0x00000000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0cdbd380 }, - { 0x0000a25c, 0x0f0f0f01 }, - { 0x0000a260, 0xdfa91f01 }, - { 0x0000a264, 0x00418a11 }, - { 0x0000b264, 0x00418a11 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0e79e5c6 }, - { 0x0000b26c, 0x0e79e5c6 }, - { 0x0000d270, 0x00820820 }, - { 0x0000a278, 0x1ce739ce }, - { 0x0000a27c, 0x050701ce }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x1ce739ce }, - { 0x0000a398, 0x000001ce }, - { 0x0000b398, 0x000001ce }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3c8, 0x00000246 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x1ce739ce }, - { 0x0000a3e0, 0x000001ce }, - { 0x0000a3e4, 0x00000000 }, - { 0x0000a3e8, 0x18c43433 }, - { 0x0000a3ec, 0x00f70081 }, - { 0x0000a3f0, 0x01036a1e }, - { 0x0000a3f4, 0x00000000 }, - { 0x0000b3f4, 0x00000000 }, - { 0x0000a7d8, 0x00000001 }, - { 0x00007800, 0x00000800 }, - { 0x00007804, 0x6c35ffb0 }, - { 0x00007808, 0x6db6c000 }, - { 0x0000780c, 0x6db6cb30 }, - { 0x00007810, 0x6db6cb6c }, - { 0x00007814, 0x0501e200 }, - { 0x00007818, 0x0094128d }, - { 0x0000781c, 0x976ee392 }, - { 0x00007820, 0xf75ff6fc }, - { 0x00007824, 0x00040000 }, - { 0x00007828, 0xdb003012 }, - { 0x0000782c, 0x04924914 }, - { 0x00007830, 0x21084210 }, - { 0x00007834, 0x00140000 }, - { 0x00007838, 0x0e4548d8 }, - { 0x0000783c, 0x54214514 }, - { 0x00007840, 0x02025820 }, - { 0x00007844, 0x71c0d388 }, - { 0x00007848, 0x934934a8 }, - { 0x00007850, 0x00000000 }, - { 0x00007854, 0x00000800 }, - { 0x00007858, 0x6c35ffb0 }, - { 0x0000785c, 0x6db6c000 }, - { 0x00007860, 0x6db6cb2c }, - { 0x00007864, 0x6db6cb6c }, - { 0x00007868, 0x0501e200 }, - { 0x0000786c, 0x0094128d }, - { 0x00007870, 0x976ee392 }, - { 0x00007874, 0xf75ff6fc }, - { 0x00007878, 0x00040000 }, - { 0x0000787c, 0xdb003012 }, - { 0x00007880, 0x04924914 }, - { 0x00007884, 0x21084210 }, - { 0x00007888, 0x001b6db0 }, - { 0x0000788c, 0x00376b63 }, - { 0x00007890, 0x06db6db6 }, - { 0x00007894, 0x006d8000 }, - { 0x00007898, 0x48100000 }, - { 0x0000789c, 0x00000000 }, - { 0x000078a0, 0x08000000 }, - { 0x000078a4, 0x0007ffd8 }, - { 0x000078a8, 0x0007ffd8 }, - { 0x000078ac, 0x001c0020 }, - { 0x000078b0, 0x000611eb }, - { 0x000078b4, 0x40008080 }, - { 0x000078b8, 0x2a850160 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffc}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; -static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a }, - { 0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b }, - { 0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a }, - { 0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a }, - { 0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a }, - { 0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a }, - { 0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a }, - { 0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a }, - { 0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c }, - { 0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc }, - { 0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4 }, - { 0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc }, - { 0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede }, - { 0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e }, - { 0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e }, - { 0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e }, - { 0x0000a780, 0x00000000, 0x00000000, 0x00000060, 0x00000060, 0x00000060 }, - { 0x0000a784, 0x00000000, 0x00000000, 0x00004062, 0x00004062, 0x00004062 }, - { 0x0000a788, 0x00000000, 0x00000000, 0x00008064, 0x00008064, 0x00008064 }, - { 0x0000a78c, 0x00000000, 0x00000000, 0x0000c0a4, 0x0000c0a4, 0x0000c0a4 }, - { 0x0000a790, 0x00000000, 0x00000000, 0x000100b0, 0x000100b0, 0x000100b0 }, - { 0x0000a794, 0x00000000, 0x00000000, 0x000140b2, 0x000140b2, 0x000140b2 }, - { 0x0000a798, 0x00000000, 0x00000000, 0x000180b4, 0x000180b4, 0x000180b4 }, - { 0x0000a79c, 0x00000000, 0x00000000, 0x0001c0f4, 0x0001c0f4, 0x0001c0f4 }, - { 0x0000a7a0, 0x00000000, 0x00000000, 0x00020134, 0x00020134, 0x00020134 }, - { 0x0000a7a4, 0x00000000, 0x00000000, 0x000240fe, 0x000240fe, 0x000240fe }, - { 0x0000a7a8, 0x00000000, 0x00000000, 0x0002813e, 0x0002813e, 0x0002813e }, - { 0x0000a7ac, 0x00000000, 0x00000000, 0x0002c17e, 0x0002c17e, 0x0002c17e }, - { 0x0000a7b0, 0x00000000, 0x00000000, 0x000301be, 0x000301be, 0x000301be }, - { 0x0000a7b4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7b8, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7bc, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7c0, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7c4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7c8, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7cc, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7d0, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a7d4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, - { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 }, -}; - - -static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, - { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, - { 0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, - { 0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, - { 0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, - { 0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, - { 0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, - { 0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, - { 0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, - { 0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, - { 0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, - { 0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, - { 0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, - { 0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, - { 0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, - { 0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, - { 0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, - { 0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, - { 0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, - { 0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, - { 0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, - { 0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, - { 0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, - { 0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, - { 0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, - { 0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, - { 0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, - { 0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, - { 0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, - { 0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, - { 0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, - { 0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, - { 0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, - { 0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, - { 0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, - { 0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, - { 0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, - { 0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, - { 0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, - { 0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, - { 0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, - { 0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, - { 0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, - { 0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, - { 0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, - { 0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, - { 0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, - { 0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, - { 0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, - { 0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, - { 0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, - { 0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, - { 0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, - { 0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, - { 0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, - { 0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, - { 0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, - { 0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, - { 0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, - { 0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, - { 0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, - { 0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, - { 0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, - { 0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, - { 0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, - { 0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, - { 0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, - { 0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, - { 0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, - { 0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, - { 0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, - { 0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, - { 0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, - { 0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, - { 0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, - { 0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, - { 0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, - { 0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, - { 0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, - { 0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, - { 0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, - { 0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, - { 0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, - { 0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, - { 0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, - { 0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, - { 0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, - { 0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, - { 0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, - { 0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, - { 0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, - { 0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, - { 0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, - { 0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, - { 0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, - { 0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, - { 0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, - { 0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, - { 0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, - { 0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, - { 0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, - { 0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, - { 0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, - { 0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, - { 0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, - { 0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, - { 0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, - { 0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, - { 0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, - { 0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, - { 0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, - { 0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, - { 0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, - { 0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, - { 0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, - { 0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, - { 0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, - { 0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, - { 0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, - { 0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, - { 0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, - { 0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, - { 0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, - { 0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, - { 0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, - { 0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, - { 0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, - { 0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, - { 0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, - { 0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, - { 0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, - { 0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, - { 0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, - { 0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, - { 0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, - { 0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, - { 0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, - { 0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, - { 0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, - { 0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, - { 0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, - { 0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, - { 0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, - { 0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, - { 0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, - { 0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, - { 0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, - { 0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, - { 0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, - { 0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, - { 0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, - { 0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, - { 0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, - { 0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, - { 0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, - { 0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, - { 0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, - { 0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, - { 0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, - { 0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, - { 0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, - { 0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, - { 0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, - { 0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, - { 0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, - { 0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, - { 0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, - { 0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, - { 0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, - { 0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, - { 0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, - { 0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, - { 0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, - { 0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, - { 0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, - { 0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, - { 0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, - { 0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, - { 0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, - { 0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, - { 0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, - { 0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, - { 0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, - { 0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, - { 0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, - { 0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, - { 0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, - { 0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, - { 0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, - { 0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, - { 0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, - { 0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, - { 0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, - { 0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, - { 0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, - { 0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, - { 0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, - { 0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, - { 0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, - { 0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, - { 0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, - { 0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, - { 0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, - { 0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, - { 0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, - { 0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, - { 0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, - { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, -}; - -static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffd }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, -}; - -static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffc }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, -}; - -/* AR9287 Revision 11 */ - static const u32 ar9287Modes_9287_1_1[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, - { 0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f }, - { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, - { 0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a }, - { 0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880 }, - { 0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e }, - { 0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001, 0x3a020001 }, - { 0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e }, - { 0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0 }, - { 0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, - { 0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, - { 0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e }, - { 0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18 }, - { 0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, - { 0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, - { 0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010 }, - { 0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, - { 0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, - { 0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210 }, - { 0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce }, - { 0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c }, - { 0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, - { 0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444 }, - { 0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000 }, - { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, - { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + {0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0}, + {0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0}, + {0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180}, + {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008}, + {0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0}, + {0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f}, + {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810}, + {0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a}, + {0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880}, + {0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303}, + {0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200}, + {0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e}, + {0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001, 0x3a020001}, + {0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007}, + {0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e}, + {0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0}, + {0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2}, + {0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e}, + {0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18}, + {0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00}, + {0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881}, + {0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0}, + {0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016}, + {0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d}, + {0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010}, + {0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010}, + {0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010}, + {0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210}, + {0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce}, + {0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c}, + {0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00}, + {0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, + {0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444}, + {0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a}, + {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000}, + {0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000}, + {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e}, + {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, }; static const u32 ar9287Common_9287_1_1[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020015 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00004024, 0x0000001f }, - { 0x00004060, 0x00000000 }, - { 0x00004064, 0x00000000 }, - { 0x00007010, 0x00000033 }, - { 0x00007020, 0x00000000 }, - { 0x00007034, 0x00000002 }, - { 0x00007038, 0x000004c2 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x40000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x00008070, 0x00000000 }, - { 0x000080c0, 0x2a80001a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0xffffffff }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x18487320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c0, 0x00000000 }, - { 0x000081c4, 0x00000000 }, - { 0x000081d4, 0x00000000 }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008264, 0x88a00010 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x000000ff }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x0000829c, 0x00000000 }, - { 0x00008300, 0x00000040 }, - { 0x00008314, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000007 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00ff0000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x000107ff }, - { 0x00008344, 0x01c81043 }, - { 0x00008360, 0xffffffff }, - { 0x00008364, 0xffffffff }, - { 0x00008368, 0x00000000 }, - { 0x00008370, 0x00000000 }, - { 0x00008374, 0x000000ff }, - { 0x00008378, 0x00000000 }, - { 0x0000837c, 0x00000000 }, - { 0x00008380, 0xffffffff }, - { 0x00008384, 0xffffffff }, - { 0x00008390, 0x0fffffff }, - { 0x00008394, 0x0fffffff }, - { 0x00008398, 0x00000000 }, - { 0x0000839c, 0x00000000 }, - { 0x000083a0, 0x00000000 }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xafe68e30 }, - { 0x00009810, 0xfd14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x0000984c, 0x0040233c }, - { 0x0000a84c, 0x0040233c }, - { 0x00009854, 0x00000044 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x00009910, 0x10002310 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x04900000 }, - { 0x0000a920, 0x04900000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009930, 0x00000000 }, - { 0x0000a930, 0x00000000 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009948, 0x9280c00a }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5f3ca3de }, - { 0x00009958, 0x0108ecff }, - { 0x00009940, 0x14750604 }, - { 0x0000c95c, 0x004b6a8e }, - { 0x00009970, 0x990bb514 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x0c6f0000 }, - { 0x000099b0, 0x03051000 }, - { 0x000099b4, 0x00000820 }, - { 0x000099c4, 0x06336f77 }, - { 0x000099c8, 0x6af6532f }, - { 0x000099cc, 0x08f186c8 }, - { 0x000099d0, 0x00046384 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000000 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099f0, 0x00000000 }, - { 0x000099fc, 0x00001042 }, - { 0x0000a208, 0x803e4788 }, - { 0x0000a210, 0x4080a333 }, - { 0x0000a214, 0x40206c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x01834061 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x000003b5 }, - { 0x0000a22c, 0x233f7180 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a23c, 0x13c889af }, - { 0x0000a240, 0x38490a20 }, - { 0x0000a244, 0x00000000 }, - { 0x0000a248, 0xfffffffc }, - { 0x0000a24c, 0x00000000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0cdbd380 }, - { 0x0000a25c, 0x0f0f0f01 }, - { 0x0000a260, 0xdfa91f01 }, - { 0x0000a264, 0x00418a11 }, - { 0x0000b264, 0x00418a11 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0e79e5c6 }, - { 0x0000b26c, 0x0e79e5c6 }, - { 0x0000d270, 0x00820820 }, - { 0x0000a278, 0x1ce739ce }, - { 0x0000a27c, 0x050701ce }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a394, 0x1ce739ce }, - { 0x0000a398, 0x000001ce }, - { 0x0000b398, 0x000001ce }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3c8, 0x00000246 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3dc, 0x1ce739ce }, - { 0x0000a3e0, 0x000001ce }, - { 0x0000a3e4, 0x00000000 }, - { 0x0000a3e8, 0x18c43433 }, - { 0x0000a3ec, 0x00f70081 }, - { 0x0000a3f0, 0x01036a1e }, - { 0x0000a3f4, 0x00000000 }, - { 0x0000b3f4, 0x00000000 }, - { 0x0000a7d8, 0x000003f1 }, - { 0x00007800, 0x00000800 }, - { 0x00007804, 0x6c35ffd2 }, - { 0x00007808, 0x6db6c000 }, - { 0x0000780c, 0x6db6cb30 }, - { 0x00007810, 0x6db6cb6c }, - { 0x00007814, 0x0501e200 }, - { 0x00007818, 0x0094128d }, - { 0x0000781c, 0x976ee392 }, - { 0x00007820, 0xf75ff6fc }, - { 0x00007824, 0x00040000 }, - { 0x00007828, 0xdb003012 }, - { 0x0000782c, 0x04924914 }, - { 0x00007830, 0x21084210 }, - { 0x00007834, 0x00140000 }, - { 0x00007838, 0x0e4548d8 }, - { 0x0000783c, 0x54214514 }, - { 0x00007840, 0x02025830 }, - { 0x00007844, 0x71c0d388 }, - { 0x00007848, 0x934934a8 }, - { 0x00007850, 0x00000000 }, - { 0x00007854, 0x00000800 }, - { 0x00007858, 0x6c35ffd2 }, - { 0x0000785c, 0x6db6c000 }, - { 0x00007860, 0x6db6cb30 }, - { 0x00007864, 0x6db6cb6c }, - { 0x00007868, 0x0501e200 }, - { 0x0000786c, 0x0094128d }, - { 0x00007870, 0x976ee392 }, - { 0x00007874, 0xf75ff6fc }, - { 0x00007878, 0x00040000 }, - { 0x0000787c, 0xdb003012 }, - { 0x00007880, 0x04924914 }, - { 0x00007884, 0x21084210 }, - { 0x00007888, 0x001b6db0 }, - { 0x0000788c, 0x00376b63 }, - { 0x00007890, 0x06db6db6 }, - { 0x00007894, 0x006d8000 }, - { 0x00007898, 0x48100000 }, - { 0x0000789c, 0x00000000 }, - { 0x000078a0, 0x08000000 }, - { 0x000078a4, 0x0007ffd8 }, - { 0x000078a8, 0x0007ffd8 }, - { 0x000078ac, 0x001c0020 }, - { 0x000078b0, 0x00060aeb }, - { 0x000078b4, 0x40008080 }, - { 0x000078b8, 0x2a850160 }, + /* Addr allmodes */ + {0x0000000c, 0x00000000}, + {0x00000030, 0x00020015}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000008}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00000054, 0x0000001f}, + {0x00000800, 0x00000000}, + {0x00000804, 0x00000000}, + {0x00000808, 0x00000000}, + {0x0000080c, 0x00000000}, + {0x00000810, 0x00000000}, + {0x00000814, 0x00000000}, + {0x00000818, 0x00000000}, + {0x0000081c, 0x00000000}, + {0x00000820, 0x00000000}, + {0x00000824, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x00001230, 0x00000000}, + {0x00001270, 0x00000000}, + {0x00001038, 0x00000000}, + {0x00001078, 0x00000000}, + {0x000010b8, 0x00000000}, + {0x000010f8, 0x00000000}, + {0x00001138, 0x00000000}, + {0x00001178, 0x00000000}, + {0x000011b8, 0x00000000}, + {0x000011f8, 0x00000000}, + {0x00001238, 0x00000000}, + {0x00001278, 0x00000000}, + {0x000012b8, 0x00000000}, + {0x000012f8, 0x00000000}, + {0x00001338, 0x00000000}, + {0x00001378, 0x00000000}, + {0x000013b8, 0x00000000}, + {0x000013f8, 0x00000000}, + {0x00001438, 0x00000000}, + {0x00001478, 0x00000000}, + {0x000014b8, 0x00000000}, + {0x000014f8, 0x00000000}, + {0x00001538, 0x00000000}, + {0x00001578, 0x00000000}, + {0x000015b8, 0x00000000}, + {0x000015f8, 0x00000000}, + {0x00001638, 0x00000000}, + {0x00001678, 0x00000000}, + {0x000016b8, 0x00000000}, + {0x000016f8, 0x00000000}, + {0x00001738, 0x00000000}, + {0x00001778, 0x00000000}, + {0x000017b8, 0x00000000}, + {0x000017f8, 0x00000000}, + {0x0000103c, 0x00000000}, + {0x0000107c, 0x00000000}, + {0x000010bc, 0x00000000}, + {0x000010fc, 0x00000000}, + {0x0000113c, 0x00000000}, + {0x0000117c, 0x00000000}, + {0x000011bc, 0x00000000}, + {0x000011fc, 0x00000000}, + {0x0000123c, 0x00000000}, + {0x0000127c, 0x00000000}, + {0x000012bc, 0x00000000}, + {0x000012fc, 0x00000000}, + {0x0000133c, 0x00000000}, + {0x0000137c, 0x00000000}, + {0x000013bc, 0x00000000}, + {0x000013fc, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00004030, 0x00000002}, + {0x0000403c, 0x00000002}, + {0x00004024, 0x0000001f}, + {0x00004060, 0x00000000}, + {0x00004064, 0x00000000}, + {0x00007010, 0x00000033}, + {0x00007020, 0x00000000}, + {0x00007034, 0x00000002}, + {0x00007038, 0x000004c2}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000700}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008048, 0x40000000}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x00008070, 0x00000000}, + {0x000080c0, 0x2a80001a}, + {0x000080c4, 0x05dc01e0}, + {0x000080c8, 0x1f402710}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00001e00}, + {0x000080d4, 0x00000000}, + {0x000080d8, 0x00400000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x003f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080f8, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00020000}, + {0x00008104, 0x00000001}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000168}, + {0x00008118, 0x000100aa}, + {0x0000811c, 0x00003210}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x00000000}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x18487320}, + {0x00008174, 0xfaa4fa50}, + {0x00008178, 0x00000100}, + {0x0000817c, 0x00000000}, + {0x000081c0, 0x00000000}, + {0x000081c4, 0x00000000}, + {0x000081d4, 0x00000000}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008200, 0x00000000}, + {0x00008204, 0x00000000}, + {0x00008208, 0x00000000}, + {0x0000820c, 0x00000000}, + {0x00008210, 0x00000000}, + {0x00008214, 0x00000000}, + {0x00008218, 0x00000000}, + {0x0000821c, 0x00000000}, + {0x00008220, 0x00000000}, + {0x00008224, 0x00000000}, + {0x00008228, 0x00000000}, + {0x0000822c, 0x00000000}, + {0x00008230, 0x00000000}, + {0x00008234, 0x00000000}, + {0x00008238, 0x00000000}, + {0x0000823c, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000100}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x400000ff}, + {0x00008260, 0x00080922}, + {0x00008264, 0x88a00010}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000000}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x000000ff}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x0000829c, 0x00000000}, + {0x00008300, 0x00000040}, + {0x00008314, 0x00000000}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000007}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000e00}, + {0x00008338, 0x00ff0000}, + {0x0000833c, 0x00000000}, + {0x00008340, 0x000107ff}, + {0x00008344, 0x01c81043}, + {0x00008360, 0xffffffff}, + {0x00008364, 0xffffffff}, + {0x00008368, 0x00000000}, + {0x00008370, 0x00000000}, + {0x00008374, 0x000000ff}, + {0x00008378, 0x00000000}, + {0x0000837c, 0x00000000}, + {0x00008380, 0xffffffff}, + {0x00008384, 0xffffffff}, + {0x00008390, 0x0fffffff}, + {0x00008394, 0x0fffffff}, + {0x00008398, 0x00000000}, + {0x0000839c, 0x00000000}, + {0x000083a0, 0x00000000}, + {0x00009808, 0x00000000}, + {0x0000980c, 0xafe68e30}, + {0x00009810, 0xfd14e000}, + {0x00009814, 0x9c0a9f6b}, + {0x0000981c, 0x00000000}, + {0x0000982c, 0x0000a000}, + {0x00009830, 0x00000000}, + {0x0000983c, 0x00200400}, + {0x0000984c, 0x0040233c}, + {0x0000a84c, 0x0040233c}, + {0x00009854, 0x00000044}, + {0x00009900, 0x00000000}, + {0x00009904, 0x00000000}, + {0x00009908, 0x00000000}, + {0x0000990c, 0x00000000}, + {0x00009910, 0x10002310}, + {0x0000991c, 0x10000fff}, + {0x00009920, 0x04900000}, + {0x0000a920, 0x04900000}, + {0x00009928, 0x00000001}, + {0x0000992c, 0x00000004}, + {0x00009930, 0x00000000}, + {0x0000a930, 0x00000000}, + {0x00009934, 0x1e1f2022}, + {0x00009938, 0x0a0b0c0d}, + {0x0000993c, 0x00000000}, + {0x00009948, 0x9280c00a}, + {0x0000994c, 0x00020028}, + {0x00009954, 0x5f3ca3de}, + {0x00009958, 0x0108ecff}, + {0x00009940, 0x14750604}, + {0x0000c95c, 0x004b6a8e}, + {0x00009970, 0x990bb514}, + {0x00009974, 0x00000000}, + {0x00009978, 0x00000001}, + {0x0000997c, 0x00000000}, + {0x000099a0, 0x00000000}, + {0x000099a4, 0x00000001}, + {0x000099a8, 0x201fff00}, + {0x000099ac, 0x0c6f0000}, + {0x000099b0, 0x03051000}, + {0x000099b4, 0x00000820}, + {0x000099c4, 0x06336f77}, + {0x000099c8, 0x6af6532f}, + {0x000099cc, 0x08f186c8}, + {0x000099d0, 0x00046384}, + {0x000099dc, 0x00000000}, + {0x000099e0, 0x00000000}, + {0x000099e4, 0xaaaaaaaa}, + {0x000099e8, 0x3c466478}, + {0x000099ec, 0x0cc80caa}, + {0x000099f0, 0x00000000}, + {0x000099fc, 0x00001042}, + {0x0000a208, 0x803e4788}, + {0x0000a210, 0x4080a333}, + {0x0000a214, 0x40206c10}, + {0x0000a218, 0x009c4060}, + {0x0000a220, 0x01834061}, + {0x0000a224, 0x00000400}, + {0x0000a228, 0x000003b5}, + {0x0000a22c, 0x233f7180}, + {0x0000a234, 0x20202020}, + {0x0000a238, 0x20202020}, + {0x0000a23c, 0x13c889af}, + {0x0000a240, 0x38490a20}, + {0x0000a244, 0x00000000}, + {0x0000a248, 0xfffffffc}, + {0x0000a24c, 0x00000000}, + {0x0000a254, 0x00000000}, + {0x0000a258, 0x0cdbd380}, + {0x0000a25c, 0x0f0f0f01}, + {0x0000a260, 0xdfa91f01}, + {0x0000a264, 0x00418a11}, + {0x0000b264, 0x00418a11}, + {0x0000a268, 0x00000000}, + {0x0000a26c, 0x0e79e5c6}, + {0x0000b26c, 0x0e79e5c6}, + {0x0000d270, 0x00820820}, + {0x0000a278, 0x1ce739ce}, + {0x0000a27c, 0x050701ce}, + {0x0000d35c, 0x07ffffef}, + {0x0000d360, 0x0fffffe7}, + {0x0000d364, 0x17ffffe5}, + {0x0000d368, 0x1fffffe4}, + {0x0000d36c, 0x37ffffe3}, + {0x0000d370, 0x3fffffe3}, + {0x0000d374, 0x57ffffe3}, + {0x0000d378, 0x5fffffe2}, + {0x0000d37c, 0x7fffffe2}, + {0x0000d380, 0x7f3c7bba}, + {0x0000d384, 0xf3307ff0}, + {0x0000a388, 0x0c000000}, + {0x0000a38c, 0x20202020}, + {0x0000a390, 0x20202020}, + {0x0000a394, 0x1ce739ce}, + {0x0000a398, 0x000001ce}, + {0x0000b398, 0x000001ce}, + {0x0000a39c, 0x00000001}, + {0x0000a3c8, 0x00000246}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3dc, 0x1ce739ce}, + {0x0000a3e0, 0x000001ce}, + {0x0000a3e4, 0x00000000}, + {0x0000a3e8, 0x18c43433}, + {0x0000a3ec, 0x00f70081}, + {0x0000a3f0, 0x01036a1e}, + {0x0000a3f4, 0x00000000}, + {0x0000b3f4, 0x00000000}, + {0x0000a7d8, 0x000003f1}, + {0x00007800, 0x00000800}, + {0x00007804, 0x6c35ffd2}, + {0x00007808, 0x6db6c000}, + {0x0000780c, 0x6db6cb30}, + {0x00007810, 0x6db6cb6c}, + {0x00007814, 0x0501e200}, + {0x00007818, 0x0094128d}, + {0x0000781c, 0x976ee392}, + {0x00007820, 0xf75ff6fc}, + {0x00007824, 0x00040000}, + {0x00007828, 0xdb003012}, + {0x0000782c, 0x04924914}, + {0x00007830, 0x21084210}, + {0x00007834, 0x00140000}, + {0x00007838, 0x0e4548d8}, + {0x0000783c, 0x54214514}, + {0x00007840, 0x02025830}, + {0x00007844, 0x71c0d388}, + {0x00007848, 0x934934a8}, + {0x00007850, 0x00000000}, + {0x00007854, 0x00000800}, + {0x00007858, 0x6c35ffd2}, + {0x0000785c, 0x6db6c000}, + {0x00007860, 0x6db6cb30}, + {0x00007864, 0x6db6cb6c}, + {0x00007868, 0x0501e200}, + {0x0000786c, 0x0094128d}, + {0x00007870, 0x976ee392}, + {0x00007874, 0xf75ff6fc}, + {0x00007878, 0x00040000}, + {0x0000787c, 0xdb003012}, + {0x00007880, 0x04924914}, + {0x00007884, 0x21084210}, + {0x00007888, 0x001b6db0}, + {0x0000788c, 0x00376b63}, + {0x00007890, 0x06db6db6}, + {0x00007894, 0x006d8000}, + {0x00007898, 0x48100000}, + {0x0000789c, 0x00000000}, + {0x000078a0, 0x08000000}, + {0x000078a4, 0x0007ffd8}, + {0x000078a8, 0x0007ffd8}, + {0x000078ac, 0x001c0020}, + {0x000078b0, 0x00060aeb}, + {0x000078b4, 0x40008080}, + {0x000078b8, 0x2a850160}, }; -/* - * For Japanese regulatory requirements, 2484 MHz requires the following three - * registers be programmed differently from the channel between 2412 and - * 2472 MHz. - */ -static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = { - { 0x0000a1f4, 0x00fffeff }, - { 0x0000a1f8, 0x00f5f9ff }, - { 0x0000a1fc, 0xb79f6427 }, +static const u32 ar9287Common_normal_cck_fir_coeff_9287_1_1[][2] = { + /* Addr allmodes */ + {0x0000a1f4, 0x00fffeff}, + {0x0000a1f8, 0x00f5f9ff}, + {0x0000a1fc, 0xb79f6427}, }; -static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = { - { 0x0000a1f4, 0x00000000 }, - { 0x0000a1f8, 0xefff0301 }, - { 0x0000a1fc, 0xca9228ee }, +static const u32 ar9287Common_japan_2484_cck_fir_coeff_9287_1_1[][2] = { + /* Addr allmodes */ + {0x0000a1f4, 0x00000000}, + {0x0000a1f8, 0xefff0301}, + {0x0000a1fc, 0xca9228ee}, }; static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a }, - { 0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b }, - { 0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a }, - { 0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a }, - { 0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a }, - { 0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a }, - { 0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a }, - { 0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a }, - { 0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c }, - { 0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc }, - { 0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4 }, - { 0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc }, - { 0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede }, - { 0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e }, - { 0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e }, - { 0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e }, - { 0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062, 0x00000062 }, - { 0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064, 0x00004064 }, - { 0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4, 0x000080a4 }, - { 0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa, 0x0000c0aa }, - { 0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac, 0x000100ac }, - { 0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4, 0x000140b4 }, - { 0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4, 0x000180f4 }, - { 0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134, 0x0001c134 }, - { 0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174, 0x00020174 }, - { 0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c, 0x0002417c }, - { 0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e, 0x0002817e }, - { 0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be, 0x0002c1be }, - { 0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, - { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 }, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002}, + {0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004}, + {0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a}, + {0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c}, + {0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b}, + {0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a}, + {0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a}, + {0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a}, + {0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a}, + {0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a}, + {0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a}, + {0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a}, + {0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a}, + {0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c}, + {0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc}, + {0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4}, + {0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc}, + {0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede}, + {0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e}, + {0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e}, + {0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e}, + {0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062, 0x00000062}, + {0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064, 0x00004064}, + {0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4, 0x000080a4}, + {0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa, 0x0000c0aa}, + {0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac, 0x000100ac}, + {0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4, 0x000140b4}, + {0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4, 0x000180f4}, + {0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134, 0x0001c134}, + {0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174, 0x00020174}, + {0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c, 0x0002417c}, + {0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e, 0x0002817e}, + {0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be, 0x0002c1be}, + {0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe}, + {0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000}, }; static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = { - /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ - { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, - { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, - { 0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, - { 0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, - { 0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, - { 0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, - { 0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, - { 0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, - { 0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, - { 0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, - { 0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, - { 0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, - { 0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, - { 0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, - { 0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, - { 0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, - { 0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, - { 0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, - { 0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, - { 0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, - { 0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, - { 0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, - { 0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, - { 0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, - { 0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, - { 0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, - { 0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, - { 0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, - { 0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, - { 0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, - { 0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, - { 0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, - { 0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, - { 0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, - { 0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, - { 0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, - { 0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, - { 0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, - { 0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, - { 0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, - { 0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, - { 0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, - { 0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, - { 0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, - { 0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, - { 0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, - { 0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, - { 0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, - { 0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, - { 0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, - { 0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, - { 0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, - { 0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, - { 0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, - { 0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, - { 0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, - { 0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, - { 0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, - { 0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, - { 0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, - { 0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, - { 0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, - { 0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, - { 0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, - { 0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, - { 0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, - { 0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, - { 0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, - { 0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, - { 0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, - { 0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, - { 0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, - { 0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, - { 0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, - { 0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, - { 0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, - { 0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, - { 0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, - { 0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, - { 0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, - { 0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, - { 0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, - { 0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, - { 0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, - { 0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, - { 0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, - { 0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, - { 0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, - { 0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, - { 0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, - { 0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, - { 0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, - { 0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, - { 0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, - { 0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, - { 0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, - { 0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, - { 0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, - { 0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, - { 0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, - { 0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, - { 0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, - { 0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, - { 0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, - { 0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, - { 0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, - { 0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, - { 0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, - { 0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, - { 0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, - { 0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, - { 0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, - { 0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, - { 0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, - { 0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, - { 0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, - { 0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, - { 0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, - { 0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, - { 0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, - { 0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, - { 0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, - { 0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, - { 0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, - { 0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, - { 0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, - { 0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, - { 0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, - { 0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, - { 0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, - { 0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, - { 0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, - { 0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, - { 0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, - { 0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, - { 0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, - { 0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, - { 0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, - { 0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, - { 0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, - { 0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, - { 0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, - { 0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, - { 0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, - { 0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, - { 0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, - { 0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, - { 0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, - { 0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, - { 0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, - { 0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, - { 0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, - { 0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, - { 0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, - { 0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, - { 0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, - { 0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, - { 0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, - { 0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, - { 0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, - { 0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, - { 0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, - { 0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, - { 0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, - { 0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, - { 0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, - { 0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, - { 0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, - { 0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, - { 0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, - { 0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, - { 0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, - { 0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, - { 0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, - { 0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, - { 0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, - { 0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, - { 0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, - { 0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, - { 0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, - { 0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, - { 0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, - { 0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, - { 0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, - { 0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, - { 0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, - { 0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, - { 0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, - { 0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, - { 0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, - { 0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, - { 0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, - { 0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, - { 0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, - { 0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, - { 0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, - { 0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, - { 0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, - { 0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, - { 0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, - { 0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, - { 0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, - { 0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, - { 0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, - { 0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, - { 0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, - { 0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, - { 0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, - { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, + {0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120}, + {0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124}, + {0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128}, + {0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c}, + {0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130}, + {0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194}, + {0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198}, + {0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c}, + {0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210}, + {0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284}, + {0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288}, + {0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c}, + {0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290}, + {0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294}, + {0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0}, + {0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4}, + {0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8}, + {0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac}, + {0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0}, + {0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4}, + {0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8}, + {0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4}, + {0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708}, + {0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c}, + {0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710}, + {0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04}, + {0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08}, + {0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c}, + {0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10}, + {0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14}, + {0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18}, + {0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c}, + {0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90}, + {0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94}, + {0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98}, + {0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4}, + {0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8}, + {0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04}, + {0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08}, + {0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c}, + {0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10}, + {0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14}, + {0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18}, + {0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c}, + {0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90}, + {0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18}, + {0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24}, + {0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28}, + {0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314}, + {0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318}, + {0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c}, + {0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390}, + {0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394}, + {0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398}, + {0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4}, + {0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8}, + {0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac}, + {0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0}, + {0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380}, + {0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384}, + {0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388}, + {0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710}, + {0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714}, + {0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718}, + {0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10}, + {0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14}, + {0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18}, + {0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c}, + {0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90}, + {0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94}, + {0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c}, + {0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90}, + {0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94}, + {0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0}, + {0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4}, + {0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8}, + {0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac}, + {0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0}, + {0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4}, + {0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1}, + {0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5}, + {0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9}, + {0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad}, + {0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1}, + {0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5}, + {0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9}, + {0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5}, + {0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9}, + {0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd}, + {0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1}, + {0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5}, + {0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2}, + {0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6}, + {0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca}, + {0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce}, + {0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2}, + {0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6}, + {0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda}, + {0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7}, + {0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb}, + {0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf}, + {0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3}, + {0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7}, + {0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120}, + {0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124}, + {0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128}, + {0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c}, + {0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130}, + {0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194}, + {0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198}, + {0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c}, + {0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210}, + {0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284}, + {0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288}, + {0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c}, + {0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290}, + {0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294}, + {0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0}, + {0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4}, + {0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8}, + {0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac}, + {0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0}, + {0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4}, + {0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8}, + {0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4}, + {0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708}, + {0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c}, + {0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710}, + {0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04}, + {0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08}, + {0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c}, + {0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10}, + {0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14}, + {0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18}, + {0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c}, + {0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90}, + {0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94}, + {0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98}, + {0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4}, + {0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8}, + {0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04}, + {0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08}, + {0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c}, + {0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10}, + {0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14}, + {0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18}, + {0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c}, + {0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90}, + {0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18}, + {0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24}, + {0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28}, + {0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314}, + {0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318}, + {0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c}, + {0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390}, + {0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394}, + {0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398}, + {0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4}, + {0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8}, + {0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac}, + {0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0}, + {0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380}, + {0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384}, + {0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388}, + {0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710}, + {0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714}, + {0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718}, + {0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10}, + {0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14}, + {0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18}, + {0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c}, + {0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90}, + {0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94}, + {0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c}, + {0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90}, + {0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94}, + {0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0}, + {0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4}, + {0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8}, + {0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac}, + {0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0}, + {0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4}, + {0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1}, + {0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5}, + {0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9}, + {0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad}, + {0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1}, + {0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5}, + {0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9}, + {0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5}, + {0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9}, + {0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd}, + {0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1}, + {0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5}, + {0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2}, + {0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6}, + {0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca}, + {0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce}, + {0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2}, + {0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6}, + {0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda}, + {0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7}, + {0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb}, + {0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf}, + {0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3}, + {0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7}, + {0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb}, + {0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067}, + {0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067}, }; static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffd }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffd}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = { - {0x00004040, 0x9248fd00 }, - {0x00004040, 0x24924924 }, - {0x00004040, 0xa8000019 }, - {0x00004040, 0x13160820 }, - {0x00004040, 0xe5980560 }, - {0x00004040, 0xc01dcffc }, - {0x00004040, 0x1aaabe41 }, - {0x00004040, 0xbe105554 }, - {0x00004040, 0x00043007 }, - {0x00004044, 0x00000000 }, + /* Addr allmodes */ + {0x00004040, 0x9248fd00}, + {0x00004040, 0x24924924}, + {0x00004040, 0xa8000019}, + {0x00004040, 0x13160820}, + {0x00004040, 0xe5980560}, + {0x00004040, 0xc01dcffc}, + {0x00004040, 0x1aaabe41}, + {0x00004040, 0xbe105554}, + {0x00004040, 0x00043007}, + {0x00004044, 0x00000000}, }; - -/* AR9271 initialization values automaticaly created: 06/04/09 */ static const u32 ar9271Modes_9271[][6] = { - { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, - { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, - { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, - { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, - { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, - { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, - { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, - { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, - { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, - { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, - { 0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001 }, - { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, - { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, - { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, - { 0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0 }, - { 0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, - { 0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, - { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, - { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, - { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, - { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18 }, - { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, - { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, - { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310 }, - { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, - { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, - { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, - { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010 }, - { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c }, - { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, - { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, - { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f }, - { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, - { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, - { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, - { 0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, - { 0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, - { 0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, - { 0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, - { 0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, - { 0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, - { 0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, - { 0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, - { 0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, - { 0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, - { 0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, - { 0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, - { 0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, - { 0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, - { 0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, - { 0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, - { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, - { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, - { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, - { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, - { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, - { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, - { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, - { 0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, - { 0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, - { 0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, - { 0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, - { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, - { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, - { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, - { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, - { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, - { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, - { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, - { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, - { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, - { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, - { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, - { 0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, - { 0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, - { 0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, - { 0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, - { 0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, - { 0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, - { 0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, - { 0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, - { 0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, - { 0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, - { 0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, - { 0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, - { 0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, - { 0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, - { 0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, - { 0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, - { 0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, - { 0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, - { 0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, - { 0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, - { 0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, - { 0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, - { 0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, - { 0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, - { 0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, - { 0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, - { 0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, - { 0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, - { 0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, - { 0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, - { 0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, - { 0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, - { 0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, - { 0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, - { 0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, - { 0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, - { 0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, - { 0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, - { 0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, - { 0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, - { 0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, - { 0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, - { 0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, - { 0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, - { 0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, - { 0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, - { 0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, - { 0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, - { 0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, - { 0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, - { 0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, - { 0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, - { 0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, - { 0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, - { 0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, - { 0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, - { 0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, - { 0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, - { 0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, - { 0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, - { 0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, - { 0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, - { 0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, - { 0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, - { 0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, - { 0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, - { 0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, - { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, - { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, - { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, - { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 }, - { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, - { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, - { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, - { 0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, - { 0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, - { 0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, - { 0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, - { 0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, - { 0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, - { 0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, - { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, - { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, - { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, - { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 }, - { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, - { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, - { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, - { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, - { 0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, - { 0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, - { 0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, - { 0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, - { 0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, - { 0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, - { 0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, - { 0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, - { 0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, - { 0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, - { 0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, - { 0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, - { 0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, - { 0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, - { 0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, - { 0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, - { 0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, - { 0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, - { 0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, - { 0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, - { 0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, - { 0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, - { 0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, - { 0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, - { 0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, - { 0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, - { 0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, - { 0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, - { 0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, - { 0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, - { 0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, - { 0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, - { 0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, - { 0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, - { 0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, - { 0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, - { 0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, - { 0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, - { 0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, - { 0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, - { 0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, - { 0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, - { 0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, - { 0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, - { 0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, - { 0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, - { 0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, - { 0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, - { 0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, - { 0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, - { 0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, - { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, - { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, - { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, - { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, - { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, - { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, - { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180}, + {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0}, + {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f}, + {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880}, + {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303}, + {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200}, + {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, + {0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001}, + {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007}, + {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e}, + {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0}, + {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059}, + {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059}, + {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2}, + {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e}, + {0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, + {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00}, + {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881}, + {0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310}, + {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0}, + {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016}, + {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d}, + {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010}, + {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c}, + {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00}, + {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, + {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77}, + {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f}, + {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8}, + {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384}, + {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000}, + {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000}, + {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000}, + {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000}, + {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000}, + {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000}, + {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000}, + {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000}, + {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000}, + {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000}, + {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000}, + {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000}, + {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000}, + {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000}, + {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000}, + {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000}, + {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000}, + {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000}, + {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000}, + {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000}, + {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000}, + {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000}, + {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000}, + {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000}, + {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000}, + {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000}, + {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000}, + {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000}, + {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000}, + {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000}, + {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000}, + {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000}, + {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000}, + {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000}, + {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000}, + {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000}, + {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000}, + {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000}, + {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000}, + {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000}, + {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000}, + {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000}, + {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000}, + {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000}, + {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000}, + {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000}, + {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000}, + {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000}, + {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000}, + {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000}, + {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000}, + {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000}, + {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000}, + {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000}, + {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000}, + {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000}, + {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000}, + {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000}, + {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000}, + {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000}, + {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000}, + {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000}, + {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000}, + {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000}, + {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000}, + {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000}, + {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000}, + {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000}, + {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000}, + {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000}, + {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000}, + {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000}, + {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000}, + {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000}, + {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000}, + {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000}, + {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000}, + {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000}, + {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000}, + {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000}, + {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000}, + {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000}, + {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000}, + {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000}, + {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000}, + {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000}, + {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000}, + {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000}, + {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000}, + {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000}, + {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000}, + {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000}, + {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000}, + {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000}, + {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000}, + {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000}, + {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000}, + {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000}, + {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000}, + {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000}, + {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000}, + {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000}, + {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000}, + {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000}, + {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000}, + {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000}, + {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000}, + {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000}, + {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000}, + {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000}, + {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000}, + {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000}, + {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000}, + {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000}, + {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000}, + {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000}, + {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000}, + {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000}, + {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000}, + {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000}, + {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000}, + {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000}, + {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000}, + {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000}, + {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000}, + {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000}, + {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000}, + {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000}, + {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000}, + {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000}, + {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000}, + {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000}, + {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000}, + {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000}, + {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000}, + {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000}, + {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000}, + {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000}, + {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000}, + {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000}, + {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000}, + {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000}, + {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000}, + {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000}, + {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000}, + {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000}, + {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000}, + {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000}, + {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000}, + {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000}, + {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000}, + {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000}, + {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000}, + {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000}, + {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000}, + {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000}, + {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000}, + {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000}, + {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000}, + {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000}, + {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000}, + {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000}, + {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000}, + {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000}, + {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000}, + {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000}, + {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000}, + {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000}, + {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000}, + {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000}, + {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000}, + {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000}, + {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000}, + {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000}, + {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000}, + {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000}, + {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000}, + {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000}, + {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000}, + {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004}, + {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000}, + {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000}, + {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a}, + {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000}, + {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000}, + {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e}, }; static const u32 ar9271Common_9271[][2] = { - { 0x0000000c, 0x00000000 }, - { 0x00000030, 0x00020045 }, - { 0x00000034, 0x00000005 }, - { 0x00000040, 0x00000000 }, - { 0x00000044, 0x00000008 }, - { 0x00000048, 0x00000008 }, - { 0x0000004c, 0x00000010 }, - { 0x00000050, 0x00000000 }, - { 0x00000054, 0x0000001f }, - { 0x00000800, 0x00000000 }, - { 0x00000804, 0x00000000 }, - { 0x00000808, 0x00000000 }, - { 0x0000080c, 0x00000000 }, - { 0x00000810, 0x00000000 }, - { 0x00000814, 0x00000000 }, - { 0x00000818, 0x00000000 }, - { 0x0000081c, 0x00000000 }, - { 0x00000820, 0x00000000 }, - { 0x00000824, 0x00000000 }, - { 0x00001040, 0x002ffc0f }, - { 0x00001044, 0x002ffc0f }, - { 0x00001048, 0x002ffc0f }, - { 0x0000104c, 0x002ffc0f }, - { 0x00001050, 0x002ffc0f }, - { 0x00001054, 0x002ffc0f }, - { 0x00001058, 0x002ffc0f }, - { 0x0000105c, 0x002ffc0f }, - { 0x00001060, 0x002ffc0f }, - { 0x00001064, 0x002ffc0f }, - { 0x00001230, 0x00000000 }, - { 0x00001270, 0x00000000 }, - { 0x00001038, 0x00000000 }, - { 0x00001078, 0x00000000 }, - { 0x000010b8, 0x00000000 }, - { 0x000010f8, 0x00000000 }, - { 0x00001138, 0x00000000 }, - { 0x00001178, 0x00000000 }, - { 0x000011b8, 0x00000000 }, - { 0x000011f8, 0x00000000 }, - { 0x00001238, 0x00000000 }, - { 0x00001278, 0x00000000 }, - { 0x000012b8, 0x00000000 }, - { 0x000012f8, 0x00000000 }, - { 0x00001338, 0x00000000 }, - { 0x00001378, 0x00000000 }, - { 0x000013b8, 0x00000000 }, - { 0x000013f8, 0x00000000 }, - { 0x00001438, 0x00000000 }, - { 0x00001478, 0x00000000 }, - { 0x000014b8, 0x00000000 }, - { 0x000014f8, 0x00000000 }, - { 0x00001538, 0x00000000 }, - { 0x00001578, 0x00000000 }, - { 0x000015b8, 0x00000000 }, - { 0x000015f8, 0x00000000 }, - { 0x00001638, 0x00000000 }, - { 0x00001678, 0x00000000 }, - { 0x000016b8, 0x00000000 }, - { 0x000016f8, 0x00000000 }, - { 0x00001738, 0x00000000 }, - { 0x00001778, 0x00000000 }, - { 0x000017b8, 0x00000000 }, - { 0x000017f8, 0x00000000 }, - { 0x0000103c, 0x00000000 }, - { 0x0000107c, 0x00000000 }, - { 0x000010bc, 0x00000000 }, - { 0x000010fc, 0x00000000 }, - { 0x0000113c, 0x00000000 }, - { 0x0000117c, 0x00000000 }, - { 0x000011bc, 0x00000000 }, - { 0x000011fc, 0x00000000 }, - { 0x0000123c, 0x00000000 }, - { 0x0000127c, 0x00000000 }, - { 0x000012bc, 0x00000000 }, - { 0x000012fc, 0x00000000 }, - { 0x0000133c, 0x00000000 }, - { 0x0000137c, 0x00000000 }, - { 0x000013bc, 0x00000000 }, - { 0x000013fc, 0x00000000 }, - { 0x0000143c, 0x00000000 }, - { 0x0000147c, 0x00000000 }, - { 0x00004030, 0x00000002 }, - { 0x0000403c, 0x00000002 }, - { 0x00004024, 0x0000001f }, - { 0x00004060, 0x00000000 }, - { 0x00004064, 0x00000000 }, - { 0x00008004, 0x00000000 }, - { 0x00008008, 0x00000000 }, - { 0x0000800c, 0x00000000 }, - { 0x00008018, 0x00000700 }, - { 0x00008020, 0x00000000 }, - { 0x00008038, 0x00000000 }, - { 0x0000803c, 0x00000000 }, - { 0x00008048, 0x00000000 }, - { 0x00008054, 0x00000000 }, - { 0x00008058, 0x00000000 }, - { 0x0000805c, 0x000fc78f }, - { 0x00008060, 0x0000000f }, - { 0x00008064, 0x00000000 }, - { 0x00008070, 0x00000000 }, - { 0x000080b0, 0x00000000 }, - { 0x000080b4, 0x00000000 }, - { 0x000080b8, 0x00000000 }, - { 0x000080bc, 0x00000000 }, - { 0x000080c0, 0x2a80001a }, - { 0x000080c4, 0x05dc01e0 }, - { 0x000080c8, 0x1f402710 }, - { 0x000080cc, 0x01f40000 }, - { 0x000080d0, 0x00001e00 }, - { 0x000080d4, 0x00000000 }, - { 0x000080d8, 0x00400000 }, - { 0x000080e0, 0xffffffff }, - { 0x000080e4, 0x0000ffff }, - { 0x000080e8, 0x003f3f3f }, - { 0x000080ec, 0x00000000 }, - { 0x000080f0, 0x00000000 }, - { 0x000080f4, 0x00000000 }, - { 0x000080f8, 0x00000000 }, - { 0x000080fc, 0x00020000 }, - { 0x00008100, 0x00020000 }, - { 0x00008104, 0x00000001 }, - { 0x00008108, 0x00000052 }, - { 0x0000810c, 0x00000000 }, - { 0x00008110, 0x00000168 }, - { 0x00008118, 0x000100aa }, - { 0x0000811c, 0x00003210 }, - { 0x00008120, 0x08f04810 }, - { 0x00008124, 0x00000000 }, - { 0x00008128, 0x00000000 }, - { 0x0000812c, 0x00000000 }, - { 0x00008130, 0x00000000 }, - { 0x00008134, 0x00000000 }, - { 0x00008138, 0x00000000 }, - { 0x0000813c, 0x00000000 }, - { 0x00008144, 0xffffffff }, - { 0x00008168, 0x00000000 }, - { 0x0000816c, 0x00000000 }, - { 0x00008170, 0x32143320 }, - { 0x00008174, 0xfaa4fa50 }, - { 0x00008178, 0x00000100 }, - { 0x0000817c, 0x00000000 }, - { 0x000081c0, 0x00000000 }, - { 0x000081d0, 0x0000320a }, - { 0x000081ec, 0x00000000 }, - { 0x000081f0, 0x00000000 }, - { 0x000081f4, 0x00000000 }, - { 0x000081f8, 0x00000000 }, - { 0x000081fc, 0x00000000 }, - { 0x00008200, 0x00000000 }, - { 0x00008204, 0x00000000 }, - { 0x00008208, 0x00000000 }, - { 0x0000820c, 0x00000000 }, - { 0x00008210, 0x00000000 }, - { 0x00008214, 0x00000000 }, - { 0x00008218, 0x00000000 }, - { 0x0000821c, 0x00000000 }, - { 0x00008220, 0x00000000 }, - { 0x00008224, 0x00000000 }, - { 0x00008228, 0x00000000 }, - { 0x0000822c, 0x00000000 }, - { 0x00008230, 0x00000000 }, - { 0x00008234, 0x00000000 }, - { 0x00008238, 0x00000000 }, - { 0x0000823c, 0x00000000 }, - { 0x00008240, 0x00100000 }, - { 0x00008244, 0x0010f400 }, - { 0x00008248, 0x00000100 }, - { 0x0000824c, 0x0001e800 }, - { 0x00008250, 0x00000000 }, - { 0x00008254, 0x00000000 }, - { 0x00008258, 0x00000000 }, - { 0x0000825c, 0x400000ff }, - { 0x00008260, 0x00080922 }, - { 0x00008264, 0x88a00010 }, - { 0x00008270, 0x00000000 }, - { 0x00008274, 0x40000000 }, - { 0x00008278, 0x003e4180 }, - { 0x0000827c, 0x00000000 }, - { 0x00008284, 0x0000002c }, - { 0x00008288, 0x0000002c }, - { 0x0000828c, 0x00000000 }, - { 0x00008294, 0x00000000 }, - { 0x00008298, 0x00000000 }, - { 0x0000829c, 0x00000000 }, - { 0x00008300, 0x00000040 }, - { 0x00008314, 0x00000000 }, - { 0x00008328, 0x00000000 }, - { 0x0000832c, 0x00000001 }, - { 0x00008330, 0x00000302 }, - { 0x00008334, 0x00000e00 }, - { 0x00008338, 0x00ff0000 }, - { 0x0000833c, 0x00000000 }, - { 0x00008340, 0x00010380 }, - { 0x00008344, 0x00581043 }, - { 0x00007010, 0x00000030 }, - { 0x00007034, 0x00000002 }, - { 0x00007038, 0x000004c2 }, - { 0x00007800, 0x00140000 }, - { 0x00007804, 0x0e4548d8 }, - { 0x00007808, 0x54214514 }, - { 0x0000780c, 0x02025820 }, - { 0x00007810, 0x71c0d388 }, - { 0x00007814, 0x924934a8 }, - { 0x0000781c, 0x00000000 }, - { 0x00007828, 0x66964300 }, - { 0x0000782c, 0x8db6d961 }, - { 0x00007830, 0x8db6d96c }, - { 0x00007834, 0x6140008b }, - { 0x0000783c, 0x72ee0a72 }, - { 0x00007840, 0xbbfffffc }, - { 0x00007844, 0x000c0db6 }, - { 0x00007848, 0x6db61b6f }, - { 0x0000784c, 0x6d9b66db }, - { 0x00007850, 0x6d8c6dba }, - { 0x00007854, 0x00040000 }, - { 0x00007858, 0xdb003012 }, - { 0x0000785c, 0x04924914 }, - { 0x00007860, 0x21084210 }, - { 0x00007864, 0xf7d7ffde }, - { 0x00007868, 0xc2034080 }, - { 0x00007870, 0x10142c00 }, - { 0x00009808, 0x00000000 }, - { 0x0000980c, 0xafe68e30 }, - { 0x00009810, 0xfd14e000 }, - { 0x00009814, 0x9c0a9f6b }, - { 0x0000981c, 0x00000000 }, - { 0x0000982c, 0x0000a000 }, - { 0x00009830, 0x00000000 }, - { 0x0000983c, 0x00200400 }, - { 0x0000984c, 0x0040233c }, - { 0x00009854, 0x00000044 }, - { 0x00009900, 0x00000000 }, - { 0x00009904, 0x00000000 }, - { 0x00009908, 0x00000000 }, - { 0x0000990c, 0x00000000 }, - { 0x0000991c, 0x10000fff }, - { 0x00009920, 0x04900000 }, - { 0x00009928, 0x00000001 }, - { 0x0000992c, 0x00000004 }, - { 0x00009934, 0x1e1f2022 }, - { 0x00009938, 0x0a0b0c0d }, - { 0x0000993c, 0x00000000 }, - { 0x00009940, 0x14750604 }, - { 0x00009948, 0x9280c00a }, - { 0x0000994c, 0x00020028 }, - { 0x00009954, 0x5f3ca3de }, - { 0x00009958, 0x0108ecff }, - { 0x00009968, 0x000003ce }, - { 0x00009970, 0x192bb514 }, - { 0x00009974, 0x00000000 }, - { 0x00009978, 0x00000001 }, - { 0x0000997c, 0x00000000 }, - { 0x00009980, 0x00000000 }, - { 0x00009984, 0x00000000 }, - { 0x00009988, 0x00000000 }, - { 0x0000998c, 0x00000000 }, - { 0x00009990, 0x00000000 }, - { 0x00009994, 0x00000000 }, - { 0x00009998, 0x00000000 }, - { 0x0000999c, 0x00000000 }, - { 0x000099a0, 0x00000000 }, - { 0x000099a4, 0x00000001 }, - { 0x000099a8, 0x201fff00 }, - { 0x000099ac, 0x2def0400 }, - { 0x000099b0, 0x03051000 }, - { 0x000099b4, 0x00000820 }, - { 0x000099dc, 0x00000000 }, - { 0x000099e0, 0x00000000 }, - { 0x000099e4, 0xaaaaaaaa }, - { 0x000099e8, 0x3c466478 }, - { 0x000099ec, 0x0cc80caa }, - { 0x000099f0, 0x00000000 }, - { 0x0000a208, 0x803e68c8 }, - { 0x0000a210, 0x4080a333 }, - { 0x0000a214, 0x00206c10 }, - { 0x0000a218, 0x009c4060 }, - { 0x0000a220, 0x01834061 }, - { 0x0000a224, 0x00000400 }, - { 0x0000a228, 0x000003b5 }, - { 0x0000a22c, 0x00000000 }, - { 0x0000a234, 0x20202020 }, - { 0x0000a238, 0x20202020 }, - { 0x0000a244, 0x00000000 }, - { 0x0000a248, 0xfffffffc }, - { 0x0000a24c, 0x00000000 }, - { 0x0000a254, 0x00000000 }, - { 0x0000a258, 0x0ccb5380 }, - { 0x0000a25c, 0x15151501 }, - { 0x0000a260, 0xdfa90f01 }, - { 0x0000a268, 0x00000000 }, - { 0x0000a26c, 0x0ebae9e6 }, - { 0x0000a388, 0x0c000000 }, - { 0x0000a38c, 0x20202020 }, - { 0x0000a390, 0x20202020 }, - { 0x0000a39c, 0x00000001 }, - { 0x0000a3a0, 0x00000000 }, - { 0x0000a3a4, 0x00000000 }, - { 0x0000a3a8, 0x00000000 }, - { 0x0000a3ac, 0x00000000 }, - { 0x0000a3b0, 0x00000000 }, - { 0x0000a3b4, 0x00000000 }, - { 0x0000a3b8, 0x00000000 }, - { 0x0000a3bc, 0x00000000 }, - { 0x0000a3c0, 0x00000000 }, - { 0x0000a3c4, 0x00000000 }, - { 0x0000a3cc, 0x20202020 }, - { 0x0000a3d0, 0x20202020 }, - { 0x0000a3d4, 0x20202020 }, - { 0x0000a3e4, 0x00000000 }, - { 0x0000a3e8, 0x18c43433 }, - { 0x0000a3ec, 0x00f70081 }, - { 0x0000a3f0, 0x01036a2f }, - { 0x0000a3f4, 0x00000000 }, - { 0x0000d270, 0x0d820820 }, - { 0x0000d35c, 0x07ffffef }, - { 0x0000d360, 0x0fffffe7 }, - { 0x0000d364, 0x17ffffe5 }, - { 0x0000d368, 0x1fffffe4 }, - { 0x0000d36c, 0x37ffffe3 }, - { 0x0000d370, 0x3fffffe3 }, - { 0x0000d374, 0x57ffffe3 }, - { 0x0000d378, 0x5fffffe2 }, - { 0x0000d37c, 0x7fffffe2 }, - { 0x0000d380, 0x7f3c7bba }, - { 0x0000d384, 0xf3307ff0 }, + /* Addr allmodes */ + {0x0000000c, 0x00000000}, + {0x00000030, 0x00020045}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000008}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00000054, 0x0000001f}, + {0x00000800, 0x00000000}, + {0x00000804, 0x00000000}, + {0x00000808, 0x00000000}, + {0x0000080c, 0x00000000}, + {0x00000810, 0x00000000}, + {0x00000814, 0x00000000}, + {0x00000818, 0x00000000}, + {0x0000081c, 0x00000000}, + {0x00000820, 0x00000000}, + {0x00000824, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x00001230, 0x00000000}, + {0x00001270, 0x00000000}, + {0x00001038, 0x00000000}, + {0x00001078, 0x00000000}, + {0x000010b8, 0x00000000}, + {0x000010f8, 0x00000000}, + {0x00001138, 0x00000000}, + {0x00001178, 0x00000000}, + {0x000011b8, 0x00000000}, + {0x000011f8, 0x00000000}, + {0x00001238, 0x00000000}, + {0x00001278, 0x00000000}, + {0x000012b8, 0x00000000}, + {0x000012f8, 0x00000000}, + {0x00001338, 0x00000000}, + {0x00001378, 0x00000000}, + {0x000013b8, 0x00000000}, + {0x000013f8, 0x00000000}, + {0x00001438, 0x00000000}, + {0x00001478, 0x00000000}, + {0x000014b8, 0x00000000}, + {0x000014f8, 0x00000000}, + {0x00001538, 0x00000000}, + {0x00001578, 0x00000000}, + {0x000015b8, 0x00000000}, + {0x000015f8, 0x00000000}, + {0x00001638, 0x00000000}, + {0x00001678, 0x00000000}, + {0x000016b8, 0x00000000}, + {0x000016f8, 0x00000000}, + {0x00001738, 0x00000000}, + {0x00001778, 0x00000000}, + {0x000017b8, 0x00000000}, + {0x000017f8, 0x00000000}, + {0x0000103c, 0x00000000}, + {0x0000107c, 0x00000000}, + {0x000010bc, 0x00000000}, + {0x000010fc, 0x00000000}, + {0x0000113c, 0x00000000}, + {0x0000117c, 0x00000000}, + {0x000011bc, 0x00000000}, + {0x000011fc, 0x00000000}, + {0x0000123c, 0x00000000}, + {0x0000127c, 0x00000000}, + {0x000012bc, 0x00000000}, + {0x000012fc, 0x00000000}, + {0x0000133c, 0x00000000}, + {0x0000137c, 0x00000000}, + {0x000013bc, 0x00000000}, + {0x000013fc, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00004030, 0x00000002}, + {0x0000403c, 0x00000002}, + {0x00004024, 0x0000001f}, + {0x00004060, 0x00000000}, + {0x00004064, 0x00000000}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000700}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008048, 0x00000000}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x00008070, 0x00000000}, + {0x000080b0, 0x00000000}, + {0x000080b4, 0x00000000}, + {0x000080b8, 0x00000000}, + {0x000080bc, 0x00000000}, + {0x000080c0, 0x2a80001a}, + {0x000080c4, 0x05dc01e0}, + {0x000080c8, 0x1f402710}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00001e00}, + {0x000080d4, 0x00000000}, + {0x000080d8, 0x00400000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x003f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080f8, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00020000}, + {0x00008104, 0x00000001}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000168}, + {0x00008118, 0x000100aa}, + {0x0000811c, 0x00003210}, + {0x00008120, 0x08f04810}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x00000000}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x32143320}, + {0x00008174, 0xfaa4fa50}, + {0x00008178, 0x00000100}, + {0x0000817c, 0x00000000}, + {0x000081c0, 0x00000000}, + {0x000081d0, 0x0000320a}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008200, 0x00000000}, + {0x00008204, 0x00000000}, + {0x00008208, 0x00000000}, + {0x0000820c, 0x00000000}, + {0x00008210, 0x00000000}, + {0x00008214, 0x00000000}, + {0x00008218, 0x00000000}, + {0x0000821c, 0x00000000}, + {0x00008220, 0x00000000}, + {0x00008224, 0x00000000}, + {0x00008228, 0x00000000}, + {0x0000822c, 0x00000000}, + {0x00008230, 0x00000000}, + {0x00008234, 0x00000000}, + {0x00008238, 0x00000000}, + {0x0000823c, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000100}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x400000ff}, + {0x00008260, 0x00080922}, + {0x00008264, 0x88a00010}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000000}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x00000000}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x0000829c, 0x00000000}, + {0x00008300, 0x00000040}, + {0x00008314, 0x00000000}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000001}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000e00}, + {0x00008338, 0x00ff0000}, + {0x0000833c, 0x00000000}, + {0x00008340, 0x00010380}, + {0x00008344, 0x00581043}, + {0x00007010, 0x00000030}, + {0x00007034, 0x00000002}, + {0x00007038, 0x000004c2}, + {0x00007800, 0x00140000}, + {0x00007804, 0x0e4548d8}, + {0x00007808, 0x54214514}, + {0x0000780c, 0x02025820}, + {0x00007810, 0x71c0d388}, + {0x00007814, 0x924934a8}, + {0x0000781c, 0x00000000}, + {0x00007828, 0x66964300}, + {0x0000782c, 0x8db6d961}, + {0x00007830, 0x8db6d96c}, + {0x00007834, 0x6140008b}, + {0x0000783c, 0x72ee0a72}, + {0x00007840, 0xbbfffffc}, + {0x00007844, 0x000c0db6}, + {0x00007848, 0x6db6246f}, + {0x0000784c, 0x6d9b66db}, + {0x00007850, 0x6d8c6dba}, + {0x00007854, 0x00040000}, + {0x00007858, 0xdb003012}, + {0x0000785c, 0x04924914}, + {0x00007860, 0x21084210}, + {0x00007864, 0xf7d7ffde}, + {0x00007868, 0xc2034080}, + {0x00007870, 0x10142c00}, + {0x00009808, 0x00000000}, + {0x0000980c, 0xafe68e30}, + {0x00009810, 0xfd14e000}, + {0x00009814, 0x9c0a9f6b}, + {0x0000981c, 0x00000000}, + {0x0000982c, 0x0000a000}, + {0x00009830, 0x00000000}, + {0x0000983c, 0x00200400}, + {0x0000984c, 0x0040233c}, + {0x00009854, 0x00000044}, + {0x00009900, 0x00000000}, + {0x00009904, 0x00000000}, + {0x00009908, 0x00000000}, + {0x0000990c, 0x00000000}, + {0x0000991c, 0x10000fff}, + {0x00009920, 0x04900000}, + {0x00009928, 0x00000001}, + {0x0000992c, 0x00000004}, + {0x00009934, 0x1e1f2022}, + {0x00009938, 0x0a0b0c0d}, + {0x0000993c, 0x00000000}, + {0x00009940, 0x14750604}, + {0x00009948, 0x9280c00a}, + {0x0000994c, 0x00020028}, + {0x00009954, 0x5f3ca3de}, + {0x00009958, 0x0108ecff}, + {0x00009968, 0x000003ce}, + {0x00009970, 0x192bb514}, + {0x00009974, 0x00000000}, + {0x00009978, 0x00000001}, + {0x0000997c, 0x00000000}, + {0x00009980, 0x00000000}, + {0x00009984, 0x00000000}, + {0x00009988, 0x00000000}, + {0x0000998c, 0x00000000}, + {0x00009990, 0x00000000}, + {0x00009994, 0x00000000}, + {0x00009998, 0x00000000}, + {0x0000999c, 0x00000000}, + {0x000099a0, 0x00000000}, + {0x000099a4, 0x00000001}, + {0x000099a8, 0x201fff00}, + {0x000099ac, 0x2def0400}, + {0x000099b0, 0x03051000}, + {0x000099b4, 0x00000820}, + {0x000099dc, 0x00000000}, + {0x000099e0, 0x00000000}, + {0x000099e4, 0xaaaaaaaa}, + {0x000099e8, 0x3c466478}, + {0x000099ec, 0x0cc80caa}, + {0x000099f0, 0x00000000}, + {0x0000a208, 0x803e68c8}, + {0x0000a210, 0x4080a333}, + {0x0000a214, 0x00206c10}, + {0x0000a218, 0x009c4060}, + {0x0000a220, 0x01834061}, + {0x0000a224, 0x00000400}, + {0x0000a228, 0x000003b5}, + {0x0000a22c, 0x00000000}, + {0x0000a234, 0x20202020}, + {0x0000a238, 0x20202020}, + {0x0000a244, 0x00000000}, + {0x0000a248, 0xfffffffc}, + {0x0000a24c, 0x00000000}, + {0x0000a254, 0x00000000}, + {0x0000a258, 0x0ccb5380}, + {0x0000a25c, 0x15151501}, + {0x0000a260, 0xdfa90f01}, + {0x0000a268, 0x00000000}, + {0x0000a26c, 0x0ebae9e6}, + {0x0000a388, 0x0c000000}, + {0x0000a38c, 0x20202020}, + {0x0000a390, 0x20202020}, + {0x0000a39c, 0x00000001}, + {0x0000a3a0, 0x00000000}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0x00000000}, + {0x0000a3ac, 0x00000000}, + {0x0000a3b0, 0x00000000}, + {0x0000a3b4, 0x00000000}, + {0x0000a3b8, 0x00000000}, + {0x0000a3bc, 0x00000000}, + {0x0000a3c0, 0x00000000}, + {0x0000a3c4, 0x00000000}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3e4, 0x00000000}, + {0x0000a3e8, 0x18c43433}, + {0x0000a3ec, 0x00f70081}, + {0x0000a3f0, 0x01036a2f}, + {0x0000a3f4, 0x00000000}, + {0x0000d270, 0x0d820820}, + {0x0000d35c, 0x07ffffef}, + {0x0000d360, 0x0fffffe7}, + {0x0000d364, 0x17ffffe5}, + {0x0000d368, 0x1fffffe4}, + {0x0000d36c, 0x37ffffe3}, + {0x0000d370, 0x3fffffe3}, + {0x0000d374, 0x57ffffe3}, + {0x0000d378, 0x5fffffe2}, + {0x0000d37c, 0x7fffffe2}, + {0x0000d380, 0x7f3c7bba}, + {0x0000d384, 0xf3307ff0}, }; static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = { - { 0x0000a1f4, 0x00fffeff }, - { 0x0000a1f8, 0x00f5f9ff }, - { 0x0000a1fc, 0xb79f6427 }, + /* Addr allmodes */ + {0x0000a1f4, 0x00fffeff}, + {0x0000a1f8, 0x00f5f9ff}, + {0x0000a1fc, 0xb79f6427}, }; static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = { - { 0x0000a1f4, 0x00000000 }, - { 0x0000a1f8, 0xefff0301 }, - { 0x0000a1fc, 0xca9228ee }, + /* Addr allmodes */ + {0x0000a1f4, 0x00000000}, + {0x0000a1f8, 0xefff0301}, + {0x0000a1fc, 0xca9228ee}, }; static const u32 ar9271Modes_9271_1_0_only[][6] = { - { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 }, - { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + {0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311}, + {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001}, }; static const u32 ar9271Modes_9271_ANI_reg[][6] = { - { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, - { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, - { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, - { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 }, - { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, - { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 }, - { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, - { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2}, + {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e}, + {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881}, + {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8}, + {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d}, + {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, }; static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = { - { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, - { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 }, - { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 }, - { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 }, - { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 }, - { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 }, - { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 }, - { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, - { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, - { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 }, - { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff }, - { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, - { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, - { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 }, - { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd }, - { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd }, - { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd }, - { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd }, - { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd }, - { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd }, + {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000}, + {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000}, + {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000}, + {0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000}, + {0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000}, + {0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000}, + {0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000}, + {0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000}, + {0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000}, + {0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000}, + {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000}, + {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000}, + {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000}, + {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029}, + {0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff}, + {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4}, + {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04}, + {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652}, + {0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd}, + {0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd}, + {0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd}, + {0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd}, + {0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd}, + {0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd}, }; static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = { - { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 }, - { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 }, - { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 }, - { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 }, - { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 }, - { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 }, - { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 }, - { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 }, - { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 }, - { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 }, - { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 }, - { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 }, - { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 }, - { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 }, - { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, - { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, - { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, - { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b }, - { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff }, - { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 }, - { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, - { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 }, - { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, - { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 }, - { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 }, - { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 }, - { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 }, - { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 }, + {0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000}, + {0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000}, + {0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000}, + {0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000}, + {0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000}, + {0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000}, + {0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000}, + {0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000}, + {0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000}, + {0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000}, + {0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000}, + {0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000}, + {0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000}, + {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000}, + {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000}, + {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000}, + {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000}, + {0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b}, + {0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff}, + {0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6}, + {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00}, + {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a214652, 0x0a214652, 0x0a22a652}, + {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7}, + {0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063}, + {0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63}, + {0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063}, + {0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63}, + {0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063}, }; -#endif /* INITVALS_9002_10_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 2be20d2070c4472ebf97268d6eb10755b1305672..50dda394f8bef01ee69fb3d3e88604e856f6b938 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -287,6 +287,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt); ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt); ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt); + ts->tid = MS(ads->ds_txstatus9, AR_TxTid); ts->ts_antenna = 0; return 0; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index ed314e89bfe177d65cc95c20fe4b4042f3749814..adbf031fbc5a7bd37c90590870c908d3719e8be7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -471,52 +471,47 @@ static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah, static void ar9002_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { - struct ath_common *common = ath9k_hw_common(ah); int16_t nf; nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); - - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 0] is %d\n", nf); - - if (AR_SREV_9271(ah) && (nf >= -114)) - nf = -116; - - nfarray[0] = nf; - - if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) { - nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), - AR9280_PHY_CH1_MINCCA_PWR); - - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 1] is %d\n", nf); - nfarray[1] = nf; - } + nfarray[0] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 0] is %d\n", nf); + if (IS_CHAN_HT40(ah->curchan)) + nfarray[3] = sign_extend(nf, 9); - if (AR_SREV_9271(ah) && (nf >= -114)) - nf = -116; + if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) + return; - nfarray[3] = nf; + nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); + nfarray[1] = sign_extend(nf, 9); - if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) { - nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), - AR9280_PHY_CH1_EXT_MINCCA_PWR); + nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR); + if (IS_CHAN_HT40(ah->curchan)) + nfarray[4] = sign_extend(nf, 9); +} - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 1] is %d\n", nf); - nfarray[4] = nf; +static void ar9002_hw_set_nf_limits(struct ath_hw *ah) +{ + if (AR_SREV_9285(ah)) { + ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ; + ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ; + ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9285_2GHZ; + } else if (AR_SREV_9287(ah)) { + ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ; + ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ; + ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9287_2GHZ; + } else if (AR_SREV_9271(ah)) { + ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ; + ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ; + ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9271_2GHZ; + } else { + ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ; + ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ; + ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9280_2GHZ; + ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ; + ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ; + ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9280_5GHZ; } } @@ -532,4 +527,6 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah) priv_ops->olc_init = ar9002_olc_init; priv_ops->compute_pll_control = ar9002_hw_compute_pll_control; priv_ops->do_getnf = ar9002_hw_do_getnf; + + ar9002_hw_set_nf_limits(ah); } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h index 81bf6e5840e1162d7e658e9a3a2a82eec0bf8fa4..c5151a4dd10bb01e742e7d9aaa2932519d40afd8 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h @@ -114,6 +114,10 @@ #define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000 #define AR_PHY_FIND_SIG_FIRPWR_S 18 +#define AR_PHY_FIND_SIG_LOW 0x9840 +#define AR_PHY_FIND_SIG_FIRSTEP_LOW 0x00000FC0L +#define AR_PHY_FIND_SIG_FIRSTEP_LOW_S 6 + #define AR_PHY_AGC_CTL1 0x985C #define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80 #define AR_PHY_AGC_CTL1_COARSE_LOW_S 7 @@ -325,6 +329,9 @@ #define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9 #define AR_PHY_EXT_CCA_THRESH62 0x007F0000 #define AR_PHY_EXT_CCA_THRESH62_S 16 +#define AR_PHY_EXT_TIMING5_CYCPWR_THR1 0x0000FE00L +#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9 + #define AR_PHY_EXT_MINCCA_PWR 0xFF800000 #define AR_PHY_EXT_MINCCA_PWR_S 23 #define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000 @@ -569,4 +576,30 @@ #define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000 #define AR_PHY_CH2_EXT_MINCCA_PWR_S 23 +#define AR_PHY_CCA_NOM_VAL_5416_2GHZ -90 +#define AR_PHY_CCA_NOM_VAL_5416_5GHZ -100 +#define AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ -100 +#define AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ -110 +#define AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ -80 +#define AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ -90 + +#define AR_PHY_CCA_NOM_VAL_9280_2GHZ -112 +#define AR_PHY_CCA_NOM_VAL_9280_5GHZ -112 +#define AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ -127 +#define AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ -122 +#define AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ -97 +#define AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ -102 + +#define AR_PHY_CCA_NOM_VAL_9285_2GHZ -118 +#define AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ -127 +#define AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ -108 + +#define AR_PHY_CCA_NOM_VAL_9271_2GHZ -118 +#define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ -127 +#define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ -116 + +#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -120 +#define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ -127 +#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -110 + #endif diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h similarity index 87% rename from drivers/net/wireless/ath/ath9k/ar9003_initvals.h rename to drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h index db019dd220b7285f3a230add89f191107f2b80b4..d3375fc4ce8b22f6dda995d7a0ffacbd09bc6892 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h @@ -14,8 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef INITVALS_9003_H -#define INITVALS_9003_H +#ifndef INITVALS_9003_2P0_H +#define INITVALS_9003_2P0_H /* AR9003 2.0 */ @@ -835,71 +835,71 @@ static const u32 ar9300_2p0_baseband_core[][2] = { static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, - {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, - {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, - {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200}, - {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202}, - {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400}, - {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402}, - {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404}, - {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603}, - {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02}, - {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04}, - {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20}, - {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20}, - {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22}, - {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24}, - {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640}, - {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, - {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, - {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, - {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, - {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, - {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, - {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, - {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, - {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, - {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002}, - {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004}, - {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200}, - {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202}, - {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400}, - {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402}, - {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404}, - {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603}, - {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02}, - {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04}, - {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20}, - {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20}, - {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22}, - {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24}, - {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640}, - {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660}, - {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861}, - {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81}, - {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83}, - {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84}, - {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3}, - {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5}, - {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9}, - {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb}, - {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6}, {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001}, {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c}, @@ -913,71 +913,71 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = { static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, - {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, - {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, - {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200}, - {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202}, - {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400}, - {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402}, - {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404}, - {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603}, - {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02}, - {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04}, - {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20}, - {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20}, - {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22}, - {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24}, - {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640}, - {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, - {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, - {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, - {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84}, - {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, - {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, - {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, - {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, - {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, - {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002}, - {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004}, - {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200}, - {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202}, - {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400}, - {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402}, - {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404}, - {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603}, - {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02}, - {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04}, - {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20}, - {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20}, - {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22}, - {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24}, - {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640}, - {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660}, - {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861}, - {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81}, - {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83}, - {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84}, - {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3}, - {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5}, - {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9}, - {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb}, - {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, - {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec}, + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4}, {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001}, {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, @@ -1781,4 +1781,4 @@ static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = { {0x00004044, 0x00000000}, }; -#endif /* INITVALS_9003_H */ +#endif /* INITVALS_9003_2P0_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h new file mode 100644 index 0000000000000000000000000000000000000000..ec98ab50748a7e0d31189a9d414ab28a35b6fda5 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -0,0 +1,1785 @@ +/* + * Copyright (c) 2010 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef INITVALS_9003_2P2_H +#define INITVALS_9003_2P2_H + +/* AR9003 2.2 */ + +static const u32 ar9300_2p2_radio_postamble[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31}, + {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800}, + {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20}, + {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, + {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, + {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008}, +}; + +static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, + {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, + {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400}, + {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402}, + {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404}, + {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603}, + {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02}, + {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04}, + {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20}, + {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20}, + {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22}, + {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24}, + {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640}, + {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, + {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861}, + {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81}, + {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83}, + {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84}, + {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3}, + {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5}, + {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9}, + {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb}, + {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, + {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, + {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, + {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400}, + {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402}, + {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603}, + {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02}, + {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04}, + {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20}, + {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20}, + {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22}, + {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24}, + {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640}, + {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660}, + {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861}, + {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81}, + {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83}, + {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84}, + {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3}, + {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5}, + {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9}, + {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb}, + {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, + {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, + {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, + {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, +}; + +static const u32 ar9300Modes_fast_clock_2p2[][3] = { + /* Addr 5G_HT20 5G_HT40 */ + {0x00001030, 0x00000268, 0x000004d0}, + {0x00001070, 0x0000018c, 0x00000318}, + {0x000010b0, 0x00000fd0, 0x00001fa0}, + {0x00008014, 0x044c044c, 0x08980898}, + {0x0000801c, 0x148ec02b, 0x148ec057}, + {0x00008318, 0x000044c0, 0x00008980}, + {0x00009e00, 0x03721821, 0x03721821}, + {0x0000a230, 0x0000000b, 0x00000016}, + {0x0000a254, 0x00000898, 0x00001130}, +}; + +static const u32 ar9300_2p2_radio_core[][2] = { + /* Addr allmodes */ + {0x00016000, 0x36db6db6}, + {0x00016004, 0x6db6db40}, + {0x00016008, 0x73f00000}, + {0x0001600c, 0x00000000}, + {0x00016040, 0x7f80fff8}, + {0x0001604c, 0x76d005b5}, + {0x00016050, 0x556cf031}, + {0x00016054, 0x13449440}, + {0x00016058, 0x0c51c92c}, + {0x0001605c, 0x3db7fffc}, + {0x00016060, 0xfffffffc}, + {0x00016064, 0x000f0278}, + {0x0001606c, 0x6db60000}, + {0x00016080, 0x00000000}, + {0x00016084, 0x0e48048c}, + {0x00016088, 0x54214514}, + {0x0001608c, 0x119f481e}, + {0x00016090, 0x24926490}, + {0x00016098, 0xd2888888}, + {0x000160a0, 0x0a108ffe}, + {0x000160a4, 0x812fc370}, + {0x000160a8, 0x423c8000}, + {0x000160b4, 0x92480080}, + {0x000160c0, 0x00adb6d0}, + {0x000160c4, 0x6db6db60}, + {0x000160c8, 0x6db6db6c}, + {0x000160cc, 0x01e6c000}, + {0x00016100, 0x3fffbe01}, + {0x00016104, 0xfff80000}, + {0x00016108, 0x00080010}, + {0x00016144, 0x02084080}, + {0x00016148, 0x00000000}, + {0x00016280, 0x058a0001}, + {0x00016284, 0x3d840208}, + {0x00016288, 0x05a20408}, + {0x0001628c, 0x00038c07}, + {0x00016290, 0x00000004}, + {0x00016294, 0x458aa14f}, + {0x00016380, 0x00000000}, + {0x00016384, 0x00000000}, + {0x00016388, 0x00800700}, + {0x0001638c, 0x00800700}, + {0x00016390, 0x00800700}, + {0x00016394, 0x00000000}, + {0x00016398, 0x00000000}, + {0x0001639c, 0x00000000}, + {0x000163a0, 0x00000001}, + {0x000163a4, 0x00000001}, + {0x000163a8, 0x00000000}, + {0x000163ac, 0x00000000}, + {0x000163b0, 0x00000000}, + {0x000163b4, 0x00000000}, + {0x000163b8, 0x00000000}, + {0x000163bc, 0x00000000}, + {0x000163c0, 0x000000a0}, + {0x000163c4, 0x000c0000}, + {0x000163c8, 0x14021402}, + {0x000163cc, 0x00001402}, + {0x000163d0, 0x00000000}, + {0x000163d4, 0x00000000}, + {0x00016400, 0x36db6db6}, + {0x00016404, 0x6db6db40}, + {0x00016408, 0x73f00000}, + {0x0001640c, 0x00000000}, + {0x00016440, 0x7f80fff8}, + {0x0001644c, 0x76d005b5}, + {0x00016450, 0x556cf031}, + {0x00016454, 0x13449440}, + {0x00016458, 0x0c51c92c}, + {0x0001645c, 0x3db7fffc}, + {0x00016460, 0xfffffffc}, + {0x00016464, 0x000f0278}, + {0x0001646c, 0x6db60000}, + {0x00016500, 0x3fffbe01}, + {0x00016504, 0xfff80000}, + {0x00016508, 0x00080010}, + {0x00016544, 0x02084080}, + {0x00016548, 0x00000000}, + {0x00016780, 0x00000000}, + {0x00016784, 0x00000000}, + {0x00016788, 0x00800700}, + {0x0001678c, 0x00800700}, + {0x00016790, 0x00800700}, + {0x00016794, 0x00000000}, + {0x00016798, 0x00000000}, + {0x0001679c, 0x00000000}, + {0x000167a0, 0x00000001}, + {0x000167a4, 0x00000001}, + {0x000167a8, 0x00000000}, + {0x000167ac, 0x00000000}, + {0x000167b0, 0x00000000}, + {0x000167b4, 0x00000000}, + {0x000167b8, 0x00000000}, + {0x000167bc, 0x00000000}, + {0x000167c0, 0x000000a0}, + {0x000167c4, 0x000c0000}, + {0x000167c8, 0x14021402}, + {0x000167cc, 0x00001402}, + {0x000167d0, 0x00000000}, + {0x000167d4, 0x00000000}, + {0x00016800, 0x36db6db6}, + {0x00016804, 0x6db6db40}, + {0x00016808, 0x73f00000}, + {0x0001680c, 0x00000000}, + {0x00016840, 0x7f80fff8}, + {0x0001684c, 0x76d005b5}, + {0x00016850, 0x556cf031}, + {0x00016854, 0x13449440}, + {0x00016858, 0x0c51c92c}, + {0x0001685c, 0x3db7fffc}, + {0x00016860, 0xfffffffc}, + {0x00016864, 0x000f0278}, + {0x0001686c, 0x6db60000}, + {0x00016900, 0x3fffbe01}, + {0x00016904, 0xfff80000}, + {0x00016908, 0x00080010}, + {0x00016944, 0x02084080}, + {0x00016948, 0x00000000}, + {0x00016b80, 0x00000000}, + {0x00016b84, 0x00000000}, + {0x00016b88, 0x00800700}, + {0x00016b8c, 0x00800700}, + {0x00016b90, 0x00800700}, + {0x00016b94, 0x00000000}, + {0x00016b98, 0x00000000}, + {0x00016b9c, 0x00000000}, + {0x00016ba0, 0x00000001}, + {0x00016ba4, 0x00000001}, + {0x00016ba8, 0x00000000}, + {0x00016bac, 0x00000000}, + {0x00016bb0, 0x00000000}, + {0x00016bb4, 0x00000000}, + {0x00016bb8, 0x00000000}, + {0x00016bbc, 0x00000000}, + {0x00016bc0, 0x000000a0}, + {0x00016bc4, 0x000c0000}, + {0x00016bc8, 0x14021402}, + {0x00016bcc, 0x00001402}, + {0x00016bd0, 0x00000000}, + {0x00016bd4, 0x00000000}, +}; + +static const u32 ar9300Common_rx_gain_table_merlin_2p2[][2] = { + /* Addr allmodes */ + {0x0000a000, 0x02000101}, + {0x0000a004, 0x02000102}, + {0x0000a008, 0x02000103}, + {0x0000a00c, 0x02000104}, + {0x0000a010, 0x02000200}, + {0x0000a014, 0x02000201}, + {0x0000a018, 0x02000202}, + {0x0000a01c, 0x02000203}, + {0x0000a020, 0x02000204}, + {0x0000a024, 0x02000205}, + {0x0000a028, 0x02000208}, + {0x0000a02c, 0x02000302}, + {0x0000a030, 0x02000303}, + {0x0000a034, 0x02000304}, + {0x0000a038, 0x02000400}, + {0x0000a03c, 0x02010300}, + {0x0000a040, 0x02010301}, + {0x0000a044, 0x02010302}, + {0x0000a048, 0x02000500}, + {0x0000a04c, 0x02010400}, + {0x0000a050, 0x02020300}, + {0x0000a054, 0x02020301}, + {0x0000a058, 0x02020302}, + {0x0000a05c, 0x02020303}, + {0x0000a060, 0x02020400}, + {0x0000a064, 0x02030300}, + {0x0000a068, 0x02030301}, + {0x0000a06c, 0x02030302}, + {0x0000a070, 0x02030303}, + {0x0000a074, 0x02030400}, + {0x0000a078, 0x02040300}, + {0x0000a07c, 0x02040301}, + {0x0000a080, 0x02040302}, + {0x0000a084, 0x02040303}, + {0x0000a088, 0x02030500}, + {0x0000a08c, 0x02040400}, + {0x0000a090, 0x02050203}, + {0x0000a094, 0x02050204}, + {0x0000a098, 0x02050205}, + {0x0000a09c, 0x02040500}, + {0x0000a0a0, 0x02050301}, + {0x0000a0a4, 0x02050302}, + {0x0000a0a8, 0x02050303}, + {0x0000a0ac, 0x02050400}, + {0x0000a0b0, 0x02050401}, + {0x0000a0b4, 0x02050402}, + {0x0000a0b8, 0x02050403}, + {0x0000a0bc, 0x02050500}, + {0x0000a0c0, 0x02050501}, + {0x0000a0c4, 0x02050502}, + {0x0000a0c8, 0x02050503}, + {0x0000a0cc, 0x02050504}, + {0x0000a0d0, 0x02050600}, + {0x0000a0d4, 0x02050601}, + {0x0000a0d8, 0x02050602}, + {0x0000a0dc, 0x02050603}, + {0x0000a0e0, 0x02050604}, + {0x0000a0e4, 0x02050700}, + {0x0000a0e8, 0x02050701}, + {0x0000a0ec, 0x02050702}, + {0x0000a0f0, 0x02050703}, + {0x0000a0f4, 0x02050704}, + {0x0000a0f8, 0x02050705}, + {0x0000a0fc, 0x02050708}, + {0x0000a100, 0x02050709}, + {0x0000a104, 0x0205070a}, + {0x0000a108, 0x0205070b}, + {0x0000a10c, 0x0205070c}, + {0x0000a110, 0x0205070d}, + {0x0000a114, 0x02050710}, + {0x0000a118, 0x02050711}, + {0x0000a11c, 0x02050712}, + {0x0000a120, 0x02050713}, + {0x0000a124, 0x02050714}, + {0x0000a128, 0x02050715}, + {0x0000a12c, 0x02050730}, + {0x0000a130, 0x02050731}, + {0x0000a134, 0x02050732}, + {0x0000a138, 0x02050733}, + {0x0000a13c, 0x02050734}, + {0x0000a140, 0x02050735}, + {0x0000a144, 0x02050750}, + {0x0000a148, 0x02050751}, + {0x0000a14c, 0x02050752}, + {0x0000a150, 0x02050753}, + {0x0000a154, 0x02050754}, + {0x0000a158, 0x02050755}, + {0x0000a15c, 0x02050770}, + {0x0000a160, 0x02050771}, + {0x0000a164, 0x02050772}, + {0x0000a168, 0x02050773}, + {0x0000a16c, 0x02050774}, + {0x0000a170, 0x02050775}, + {0x0000a174, 0x00000776}, + {0x0000a178, 0x00000776}, + {0x0000a17c, 0x00000776}, + {0x0000a180, 0x00000776}, + {0x0000a184, 0x00000776}, + {0x0000a188, 0x00000776}, + {0x0000a18c, 0x00000776}, + {0x0000a190, 0x00000776}, + {0x0000a194, 0x00000776}, + {0x0000a198, 0x00000776}, + {0x0000a19c, 0x00000776}, + {0x0000a1a0, 0x00000776}, + {0x0000a1a4, 0x00000776}, + {0x0000a1a8, 0x00000776}, + {0x0000a1ac, 0x00000776}, + {0x0000a1b0, 0x00000776}, + {0x0000a1b4, 0x00000776}, + {0x0000a1b8, 0x00000776}, + {0x0000a1bc, 0x00000776}, + {0x0000a1c0, 0x00000776}, + {0x0000a1c4, 0x00000776}, + {0x0000a1c8, 0x00000776}, + {0x0000a1cc, 0x00000776}, + {0x0000a1d0, 0x00000776}, + {0x0000a1d4, 0x00000776}, + {0x0000a1d8, 0x00000776}, + {0x0000a1dc, 0x00000776}, + {0x0000a1e0, 0x00000776}, + {0x0000a1e4, 0x00000776}, + {0x0000a1e8, 0x00000776}, + {0x0000a1ec, 0x00000776}, + {0x0000a1f0, 0x00000776}, + {0x0000a1f4, 0x00000776}, + {0x0000a1f8, 0x00000776}, + {0x0000a1fc, 0x00000776}, + {0x0000b000, 0x02000101}, + {0x0000b004, 0x02000102}, + {0x0000b008, 0x02000103}, + {0x0000b00c, 0x02000104}, + {0x0000b010, 0x02000200}, + {0x0000b014, 0x02000201}, + {0x0000b018, 0x02000202}, + {0x0000b01c, 0x02000203}, + {0x0000b020, 0x02000204}, + {0x0000b024, 0x02000205}, + {0x0000b028, 0x02000208}, + {0x0000b02c, 0x02000302}, + {0x0000b030, 0x02000303}, + {0x0000b034, 0x02000304}, + {0x0000b038, 0x02000400}, + {0x0000b03c, 0x02010300}, + {0x0000b040, 0x02010301}, + {0x0000b044, 0x02010302}, + {0x0000b048, 0x02000500}, + {0x0000b04c, 0x02010400}, + {0x0000b050, 0x02020300}, + {0x0000b054, 0x02020301}, + {0x0000b058, 0x02020302}, + {0x0000b05c, 0x02020303}, + {0x0000b060, 0x02020400}, + {0x0000b064, 0x02030300}, + {0x0000b068, 0x02030301}, + {0x0000b06c, 0x02030302}, + {0x0000b070, 0x02030303}, + {0x0000b074, 0x02030400}, + {0x0000b078, 0x02040300}, + {0x0000b07c, 0x02040301}, + {0x0000b080, 0x02040302}, + {0x0000b084, 0x02040303}, + {0x0000b088, 0x02030500}, + {0x0000b08c, 0x02040400}, + {0x0000b090, 0x02050203}, + {0x0000b094, 0x02050204}, + {0x0000b098, 0x02050205}, + {0x0000b09c, 0x02040500}, + {0x0000b0a0, 0x02050301}, + {0x0000b0a4, 0x02050302}, + {0x0000b0a8, 0x02050303}, + {0x0000b0ac, 0x02050400}, + {0x0000b0b0, 0x02050401}, + {0x0000b0b4, 0x02050402}, + {0x0000b0b8, 0x02050403}, + {0x0000b0bc, 0x02050500}, + {0x0000b0c0, 0x02050501}, + {0x0000b0c4, 0x02050502}, + {0x0000b0c8, 0x02050503}, + {0x0000b0cc, 0x02050504}, + {0x0000b0d0, 0x02050600}, + {0x0000b0d4, 0x02050601}, + {0x0000b0d8, 0x02050602}, + {0x0000b0dc, 0x02050603}, + {0x0000b0e0, 0x02050604}, + {0x0000b0e4, 0x02050700}, + {0x0000b0e8, 0x02050701}, + {0x0000b0ec, 0x02050702}, + {0x0000b0f0, 0x02050703}, + {0x0000b0f4, 0x02050704}, + {0x0000b0f8, 0x02050705}, + {0x0000b0fc, 0x02050708}, + {0x0000b100, 0x02050709}, + {0x0000b104, 0x0205070a}, + {0x0000b108, 0x0205070b}, + {0x0000b10c, 0x0205070c}, + {0x0000b110, 0x0205070d}, + {0x0000b114, 0x02050710}, + {0x0000b118, 0x02050711}, + {0x0000b11c, 0x02050712}, + {0x0000b120, 0x02050713}, + {0x0000b124, 0x02050714}, + {0x0000b128, 0x02050715}, + {0x0000b12c, 0x02050730}, + {0x0000b130, 0x02050731}, + {0x0000b134, 0x02050732}, + {0x0000b138, 0x02050733}, + {0x0000b13c, 0x02050734}, + {0x0000b140, 0x02050735}, + {0x0000b144, 0x02050750}, + {0x0000b148, 0x02050751}, + {0x0000b14c, 0x02050752}, + {0x0000b150, 0x02050753}, + {0x0000b154, 0x02050754}, + {0x0000b158, 0x02050755}, + {0x0000b15c, 0x02050770}, + {0x0000b160, 0x02050771}, + {0x0000b164, 0x02050772}, + {0x0000b168, 0x02050773}, + {0x0000b16c, 0x02050774}, + {0x0000b170, 0x02050775}, + {0x0000b174, 0x00000776}, + {0x0000b178, 0x00000776}, + {0x0000b17c, 0x00000776}, + {0x0000b180, 0x00000776}, + {0x0000b184, 0x00000776}, + {0x0000b188, 0x00000776}, + {0x0000b18c, 0x00000776}, + {0x0000b190, 0x00000776}, + {0x0000b194, 0x00000776}, + {0x0000b198, 0x00000776}, + {0x0000b19c, 0x00000776}, + {0x0000b1a0, 0x00000776}, + {0x0000b1a4, 0x00000776}, + {0x0000b1a8, 0x00000776}, + {0x0000b1ac, 0x00000776}, + {0x0000b1b0, 0x00000776}, + {0x0000b1b4, 0x00000776}, + {0x0000b1b8, 0x00000776}, + {0x0000b1bc, 0x00000776}, + {0x0000b1c0, 0x00000776}, + {0x0000b1c4, 0x00000776}, + {0x0000b1c8, 0x00000776}, + {0x0000b1cc, 0x00000776}, + {0x0000b1d0, 0x00000776}, + {0x0000b1d4, 0x00000776}, + {0x0000b1d8, 0x00000776}, + {0x0000b1dc, 0x00000776}, + {0x0000b1e0, 0x00000776}, + {0x0000b1e4, 0x00000776}, + {0x0000b1e8, 0x00000776}, + {0x0000b1ec, 0x00000776}, + {0x0000b1f0, 0x00000776}, + {0x0000b1f4, 0x00000776}, + {0x0000b1f8, 0x00000776}, + {0x0000b1fc, 0x00000776}, +}; + +static const u32 ar9300_2p2_mac_postamble[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00}, + {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b}, + {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810}, + {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a}, + {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, +}; + +static const u32 ar9300_2p2_soc_postamble[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023}, +}; + +static const u32 ar9200_merlin_2p2_radio_core[][2] = { + /* Addr allmodes */ + {0x00007800, 0x00040000}, + {0x00007804, 0xdb005012}, + {0x00007808, 0x04924914}, + {0x0000780c, 0x21084210}, + {0x00007810, 0x6d801300}, + {0x00007814, 0x0019beff}, + {0x00007818, 0x07e41000}, + {0x0000781c, 0x00392000}, + {0x00007820, 0x92592480}, + {0x00007824, 0x00040000}, + {0x00007828, 0xdb005012}, + {0x0000782c, 0x04924914}, + {0x00007830, 0x21084210}, + {0x00007834, 0x6d801300}, + {0x00007838, 0x0019beff}, + {0x0000783c, 0x07e40000}, + {0x00007840, 0x00392000}, + {0x00007844, 0x92592480}, + {0x00007848, 0x00100000}, + {0x0000784c, 0x773f0567}, + {0x00007850, 0x54214514}, + {0x00007854, 0x12035828}, + {0x00007858, 0x92592692}, + {0x0000785c, 0x00000000}, + {0x00007860, 0x56400000}, + {0x00007864, 0x0a8e370e}, + {0x00007868, 0xc0102850}, + {0x0000786c, 0x812d4000}, + {0x00007870, 0x807ec400}, + {0x00007874, 0x001b6db0}, + {0x00007878, 0x00376b63}, + {0x0000787c, 0x06db6db6}, + {0x00007880, 0x006d8000}, + {0x00007884, 0xffeffffe}, + {0x00007888, 0xffeffffe}, + {0x0000788c, 0x00010000}, + {0x00007890, 0x02060aeb}, + {0x00007894, 0x5a108000}, +}; + +static const u32 ar9300_2p2_baseband_postamble[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011}, + {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e}, + {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, + {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, + {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, + {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, + {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0}, + {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, + {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, + {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, + {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, + {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, + {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, + {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, + {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, + {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, + {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0}, + {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, + {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b}, + {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff}, + {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, + {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, + {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, + {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002}, + {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, + {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501}, + {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b}, + {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, + {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, + {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, + {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982}, + {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, + {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, + {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, + {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, + {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, + {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, + {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, + {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, + {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, + {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, + {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, +}; + +static const u32 ar9300_2p2_baseband_core[][2] = { + /* Addr allmodes */ + {0x00009800, 0xafe68e30}, + {0x00009804, 0xfd14e000}, + {0x00009808, 0x9c0a9f6b}, + {0x0000980c, 0x04900000}, + {0x00009814, 0x9280c00a}, + {0x00009818, 0x00000000}, + {0x0000981c, 0x00020028}, + {0x00009834, 0x5f3ca3de}, + {0x00009838, 0x0108ecff}, + {0x0000983c, 0x14750600}, + {0x00009880, 0x201fff00}, + {0x00009884, 0x00001042}, + {0x000098a4, 0x00200400}, + {0x000098b0, 0x52440bbe}, + {0x000098d0, 0x004b6a8e}, + {0x000098d4, 0x00000820}, + {0x000098dc, 0x00000000}, + {0x000098f0, 0x00000000}, + {0x000098f4, 0x00000000}, + {0x00009c04, 0xff55ff55}, + {0x00009c08, 0x0320ff55}, + {0x00009c0c, 0x00000000}, + {0x00009c10, 0x00000000}, + {0x00009c14, 0x00046384}, + {0x00009c18, 0x05b6b440}, + {0x00009c1c, 0x00b6b440}, + {0x00009d00, 0xc080a333}, + {0x00009d04, 0x40206c10}, + {0x00009d08, 0x009c4060}, + {0x00009d0c, 0x9883800a}, + {0x00009d10, 0x01834061}, + {0x00009d14, 0x00c0040b}, + {0x00009d18, 0x00000000}, + {0x00009e08, 0x0038230c}, + {0x00009e24, 0x990bb515}, + {0x00009e28, 0x0c6f0000}, + {0x00009e30, 0x06336f77}, + {0x00009e34, 0x6af6532f}, + {0x00009e38, 0x0cc80c00}, + {0x00009e3c, 0xcf946222}, + {0x00009e40, 0x0d261820}, + {0x00009e4c, 0x00001004}, + {0x00009e50, 0x00ff03f1}, + {0x00009e54, 0x00000000}, + {0x00009fc0, 0x803e4788}, + {0x00009fc4, 0x0001efb5}, + {0x00009fcc, 0x40000014}, + {0x00009fd0, 0x01193b93}, + {0x0000a20c, 0x00000000}, + {0x0000a220, 0x00000000}, + {0x0000a224, 0x00000000}, + {0x0000a228, 0x10002310}, + {0x0000a22c, 0x01036a1e}, + {0x0000a23c, 0x00000000}, + {0x0000a244, 0x0c000000}, + {0x0000a2a0, 0x00000001}, + {0x0000a2c0, 0x00000001}, + {0x0000a2c8, 0x00000000}, + {0x0000a2cc, 0x18c43433}, + {0x0000a2d4, 0x00000000}, + {0x0000a2dc, 0x00000000}, + {0x0000a2e0, 0x00000000}, + {0x0000a2e4, 0x00000000}, + {0x0000a2e8, 0x00000000}, + {0x0000a2ec, 0x00000000}, + {0x0000a2f0, 0x00000000}, + {0x0000a2f4, 0x00000000}, + {0x0000a2f8, 0x00000000}, + {0x0000a344, 0x00000000}, + {0x0000a34c, 0x00000000}, + {0x0000a350, 0x0000a000}, + {0x0000a364, 0x00000000}, + {0x0000a370, 0x00000000}, + {0x0000a390, 0x00000001}, + {0x0000a394, 0x00000444}, + {0x0000a398, 0x001f0e0f}, + {0x0000a39c, 0x0075393f}, + {0x0000a3a0, 0xb79f6427}, + {0x0000a3a4, 0x00000000}, + {0x0000a3a8, 0xaaaaaaaa}, + {0x0000a3ac, 0x3c466478}, + {0x0000a3c0, 0x20202020}, + {0x0000a3c4, 0x22222220}, + {0x0000a3c8, 0x20200020}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3d8, 0x20202020}, + {0x0000a3dc, 0x20202020}, + {0x0000a3e0, 0x20202020}, + {0x0000a3e4, 0x20202020}, + {0x0000a3e8, 0x20202020}, + {0x0000a3ec, 0x20202020}, + {0x0000a3f0, 0x00000000}, + {0x0000a3f4, 0x00000246}, + {0x0000a3f8, 0x0cdbd380}, + {0x0000a3fc, 0x000f0f01}, + {0x0000a400, 0x8fa91f01}, + {0x0000a404, 0x00000000}, + {0x0000a408, 0x0e79e5c6}, + {0x0000a40c, 0x00820820}, + {0x0000a414, 0x1ce739ce}, + {0x0000a418, 0x2d001dce}, + {0x0000a41c, 0x1ce739ce}, + {0x0000a420, 0x000001ce}, + {0x0000a424, 0x1ce739ce}, + {0x0000a428, 0x000001ce}, + {0x0000a42c, 0x1ce739ce}, + {0x0000a430, 0x1ce739ce}, + {0x0000a434, 0x00000000}, + {0x0000a438, 0x00001801}, + {0x0000a43c, 0x00000000}, + {0x0000a440, 0x00000000}, + {0x0000a444, 0x00000000}, + {0x0000a448, 0x06000080}, + {0x0000a44c, 0x00000001}, + {0x0000a450, 0x00010000}, + {0x0000a458, 0x00000000}, + {0x0000a600, 0x00000000}, + {0x0000a604, 0x00000000}, + {0x0000a608, 0x00000000}, + {0x0000a60c, 0x00000000}, + {0x0000a610, 0x00000000}, + {0x0000a614, 0x00000000}, + {0x0000a618, 0x00000000}, + {0x0000a61c, 0x00000000}, + {0x0000a620, 0x00000000}, + {0x0000a624, 0x00000000}, + {0x0000a628, 0x00000000}, + {0x0000a62c, 0x00000000}, + {0x0000a630, 0x00000000}, + {0x0000a634, 0x00000000}, + {0x0000a638, 0x00000000}, + {0x0000a63c, 0x00000000}, + {0x0000a640, 0x00000000}, + {0x0000a644, 0x3fad9d74}, + {0x0000a648, 0x0048060a}, + {0x0000a64c, 0x00000637}, + {0x0000a670, 0x03020100}, + {0x0000a674, 0x09080504}, + {0x0000a678, 0x0d0c0b0a}, + {0x0000a67c, 0x13121110}, + {0x0000a680, 0x31301514}, + {0x0000a684, 0x35343332}, + {0x0000a688, 0x00000036}, + {0x0000a690, 0x00000838}, + {0x0000a7c0, 0x00000000}, + {0x0000a7c4, 0xfffffffc}, + {0x0000a7c8, 0x00000000}, + {0x0000a7cc, 0x00000000}, + {0x0000a7d0, 0x00000000}, + {0x0000a7d4, 0x00000004}, + {0x0000a7dc, 0x00000001}, + {0x0000a8d0, 0x004b6a8e}, + {0x0000a8d4, 0x00000820}, + {0x0000a8dc, 0x00000000}, + {0x0000a8f0, 0x00000000}, + {0x0000a8f4, 0x00000000}, + {0x0000b2d0, 0x00000080}, + {0x0000b2d4, 0x00000000}, + {0x0000b2dc, 0x00000000}, + {0x0000b2e0, 0x00000000}, + {0x0000b2e4, 0x00000000}, + {0x0000b2e8, 0x00000000}, + {0x0000b2ec, 0x00000000}, + {0x0000b2f0, 0x00000000}, + {0x0000b2f4, 0x00000000}, + {0x0000b2f8, 0x00000000}, + {0x0000b408, 0x0e79e5c0}, + {0x0000b40c, 0x00820820}, + {0x0000b420, 0x00000000}, + {0x0000b8d0, 0x004b6a8e}, + {0x0000b8d4, 0x00000820}, + {0x0000b8dc, 0x00000000}, + {0x0000b8f0, 0x00000000}, + {0x0000b8f4, 0x00000000}, + {0x0000c2d0, 0x00000080}, + {0x0000c2d4, 0x00000000}, + {0x0000c2dc, 0x00000000}, + {0x0000c2e0, 0x00000000}, + {0x0000c2e4, 0x00000000}, + {0x0000c2e8, 0x00000000}, + {0x0000c2ec, 0x00000000}, + {0x0000c2f0, 0x00000000}, + {0x0000c2f4, 0x00000000}, + {0x0000c2f8, 0x00000000}, + {0x0000c408, 0x0e79e5c0}, + {0x0000c40c, 0x00820820}, + {0x0000c420, 0x00000000}, +}; + +static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, + {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6}, + {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001}, + {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c}, + {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6}, + {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001}, + {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c}, + {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6}, + {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001}, + {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c}, +}; + +static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, + {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4}, + {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001}, + {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4}, + {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001}, + {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4}, + {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001}, + {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, +}; + +static const u32 ar9300Common_rx_gain_table_2p2[][2] = { + /* Addr allmodes */ + {0x0000a000, 0x00010000}, + {0x0000a004, 0x00030002}, + {0x0000a008, 0x00050004}, + {0x0000a00c, 0x00810080}, + {0x0000a010, 0x00830082}, + {0x0000a014, 0x01810180}, + {0x0000a018, 0x01830182}, + {0x0000a01c, 0x01850184}, + {0x0000a020, 0x01890188}, + {0x0000a024, 0x018b018a}, + {0x0000a028, 0x018d018c}, + {0x0000a02c, 0x01910190}, + {0x0000a030, 0x01930192}, + {0x0000a034, 0x01950194}, + {0x0000a038, 0x038a0196}, + {0x0000a03c, 0x038c038b}, + {0x0000a040, 0x0390038d}, + {0x0000a044, 0x03920391}, + {0x0000a048, 0x03940393}, + {0x0000a04c, 0x03960395}, + {0x0000a050, 0x00000000}, + {0x0000a054, 0x00000000}, + {0x0000a058, 0x00000000}, + {0x0000a05c, 0x00000000}, + {0x0000a060, 0x00000000}, + {0x0000a064, 0x00000000}, + {0x0000a068, 0x00000000}, + {0x0000a06c, 0x00000000}, + {0x0000a070, 0x00000000}, + {0x0000a074, 0x00000000}, + {0x0000a078, 0x00000000}, + {0x0000a07c, 0x00000000}, + {0x0000a080, 0x22222229}, + {0x0000a084, 0x1d1d1d1d}, + {0x0000a088, 0x1d1d1d1d}, + {0x0000a08c, 0x1d1d1d1d}, + {0x0000a090, 0x171d1d1d}, + {0x0000a094, 0x11111717}, + {0x0000a098, 0x00030311}, + {0x0000a09c, 0x00000000}, + {0x0000a0a0, 0x00000000}, + {0x0000a0a4, 0x00000000}, + {0x0000a0a8, 0x00000000}, + {0x0000a0ac, 0x00000000}, + {0x0000a0b0, 0x00000000}, + {0x0000a0b4, 0x00000000}, + {0x0000a0b8, 0x00000000}, + {0x0000a0bc, 0x00000000}, + {0x0000a0c0, 0x001f0000}, + {0x0000a0c4, 0x01000101}, + {0x0000a0c8, 0x011e011f}, + {0x0000a0cc, 0x011c011d}, + {0x0000a0d0, 0x02030204}, + {0x0000a0d4, 0x02010202}, + {0x0000a0d8, 0x021f0200}, + {0x0000a0dc, 0x0302021e}, + {0x0000a0e0, 0x03000301}, + {0x0000a0e4, 0x031e031f}, + {0x0000a0e8, 0x0402031d}, + {0x0000a0ec, 0x04000401}, + {0x0000a0f0, 0x041e041f}, + {0x0000a0f4, 0x0502041d}, + {0x0000a0f8, 0x05000501}, + {0x0000a0fc, 0x051e051f}, + {0x0000a100, 0x06010602}, + {0x0000a104, 0x061f0600}, + {0x0000a108, 0x061d061e}, + {0x0000a10c, 0x07020703}, + {0x0000a110, 0x07000701}, + {0x0000a114, 0x00000000}, + {0x0000a118, 0x00000000}, + {0x0000a11c, 0x00000000}, + {0x0000a120, 0x00000000}, + {0x0000a124, 0x00000000}, + {0x0000a128, 0x00000000}, + {0x0000a12c, 0x00000000}, + {0x0000a130, 0x00000000}, + {0x0000a134, 0x00000000}, + {0x0000a138, 0x00000000}, + {0x0000a13c, 0x00000000}, + {0x0000a140, 0x001f0000}, + {0x0000a144, 0x01000101}, + {0x0000a148, 0x011e011f}, + {0x0000a14c, 0x011c011d}, + {0x0000a150, 0x02030204}, + {0x0000a154, 0x02010202}, + {0x0000a158, 0x021f0200}, + {0x0000a15c, 0x0302021e}, + {0x0000a160, 0x03000301}, + {0x0000a164, 0x031e031f}, + {0x0000a168, 0x0402031d}, + {0x0000a16c, 0x04000401}, + {0x0000a170, 0x041e041f}, + {0x0000a174, 0x0502041d}, + {0x0000a178, 0x05000501}, + {0x0000a17c, 0x051e051f}, + {0x0000a180, 0x06010602}, + {0x0000a184, 0x061f0600}, + {0x0000a188, 0x061d061e}, + {0x0000a18c, 0x07020703}, + {0x0000a190, 0x07000701}, + {0x0000a194, 0x00000000}, + {0x0000a198, 0x00000000}, + {0x0000a19c, 0x00000000}, + {0x0000a1a0, 0x00000000}, + {0x0000a1a4, 0x00000000}, + {0x0000a1a8, 0x00000000}, + {0x0000a1ac, 0x00000000}, + {0x0000a1b0, 0x00000000}, + {0x0000a1b4, 0x00000000}, + {0x0000a1b8, 0x00000000}, + {0x0000a1bc, 0x00000000}, + {0x0000a1c0, 0x00000000}, + {0x0000a1c4, 0x00000000}, + {0x0000a1c8, 0x00000000}, + {0x0000a1cc, 0x00000000}, + {0x0000a1d0, 0x00000000}, + {0x0000a1d4, 0x00000000}, + {0x0000a1d8, 0x00000000}, + {0x0000a1dc, 0x00000000}, + {0x0000a1e0, 0x00000000}, + {0x0000a1e4, 0x00000000}, + {0x0000a1e8, 0x00000000}, + {0x0000a1ec, 0x00000000}, + {0x0000a1f0, 0x00000396}, + {0x0000a1f4, 0x00000396}, + {0x0000a1f8, 0x00000396}, + {0x0000a1fc, 0x00000196}, + {0x0000b000, 0x00010000}, + {0x0000b004, 0x00030002}, + {0x0000b008, 0x00050004}, + {0x0000b00c, 0x00810080}, + {0x0000b010, 0x00830082}, + {0x0000b014, 0x01810180}, + {0x0000b018, 0x01830182}, + {0x0000b01c, 0x01850184}, + {0x0000b020, 0x02810280}, + {0x0000b024, 0x02830282}, + {0x0000b028, 0x02850284}, + {0x0000b02c, 0x02890288}, + {0x0000b030, 0x028b028a}, + {0x0000b034, 0x0388028c}, + {0x0000b038, 0x038a0389}, + {0x0000b03c, 0x038c038b}, + {0x0000b040, 0x0390038d}, + {0x0000b044, 0x03920391}, + {0x0000b048, 0x03940393}, + {0x0000b04c, 0x03960395}, + {0x0000b050, 0x00000000}, + {0x0000b054, 0x00000000}, + {0x0000b058, 0x00000000}, + {0x0000b05c, 0x00000000}, + {0x0000b060, 0x00000000}, + {0x0000b064, 0x00000000}, + {0x0000b068, 0x00000000}, + {0x0000b06c, 0x00000000}, + {0x0000b070, 0x00000000}, + {0x0000b074, 0x00000000}, + {0x0000b078, 0x00000000}, + {0x0000b07c, 0x00000000}, + {0x0000b080, 0x32323232}, + {0x0000b084, 0x2f2f3232}, + {0x0000b088, 0x23282a2d}, + {0x0000b08c, 0x1c1e2123}, + {0x0000b090, 0x14171919}, + {0x0000b094, 0x0e0e1214}, + {0x0000b098, 0x03050707}, + {0x0000b09c, 0x00030303}, + {0x0000b0a0, 0x00000000}, + {0x0000b0a4, 0x00000000}, + {0x0000b0a8, 0x00000000}, + {0x0000b0ac, 0x00000000}, + {0x0000b0b0, 0x00000000}, + {0x0000b0b4, 0x00000000}, + {0x0000b0b8, 0x00000000}, + {0x0000b0bc, 0x00000000}, + {0x0000b0c0, 0x003f0020}, + {0x0000b0c4, 0x00400041}, + {0x0000b0c8, 0x0140005f}, + {0x0000b0cc, 0x0160015f}, + {0x0000b0d0, 0x017e017f}, + {0x0000b0d4, 0x02410242}, + {0x0000b0d8, 0x025f0240}, + {0x0000b0dc, 0x027f0260}, + {0x0000b0e0, 0x0341027e}, + {0x0000b0e4, 0x035f0340}, + {0x0000b0e8, 0x037f0360}, + {0x0000b0ec, 0x04400441}, + {0x0000b0f0, 0x0460045f}, + {0x0000b0f4, 0x0541047f}, + {0x0000b0f8, 0x055f0540}, + {0x0000b0fc, 0x057f0560}, + {0x0000b100, 0x06400641}, + {0x0000b104, 0x0660065f}, + {0x0000b108, 0x067e067f}, + {0x0000b10c, 0x07410742}, + {0x0000b110, 0x075f0740}, + {0x0000b114, 0x077f0760}, + {0x0000b118, 0x07800781}, + {0x0000b11c, 0x07a0079f}, + {0x0000b120, 0x07c107bf}, + {0x0000b124, 0x000007c0}, + {0x0000b128, 0x00000000}, + {0x0000b12c, 0x00000000}, + {0x0000b130, 0x00000000}, + {0x0000b134, 0x00000000}, + {0x0000b138, 0x00000000}, + {0x0000b13c, 0x00000000}, + {0x0000b140, 0x003f0020}, + {0x0000b144, 0x00400041}, + {0x0000b148, 0x0140005f}, + {0x0000b14c, 0x0160015f}, + {0x0000b150, 0x017e017f}, + {0x0000b154, 0x02410242}, + {0x0000b158, 0x025f0240}, + {0x0000b15c, 0x027f0260}, + {0x0000b160, 0x0341027e}, + {0x0000b164, 0x035f0340}, + {0x0000b168, 0x037f0360}, + {0x0000b16c, 0x04400441}, + {0x0000b170, 0x0460045f}, + {0x0000b174, 0x0541047f}, + {0x0000b178, 0x055f0540}, + {0x0000b17c, 0x057f0560}, + {0x0000b180, 0x06400641}, + {0x0000b184, 0x0660065f}, + {0x0000b188, 0x067e067f}, + {0x0000b18c, 0x07410742}, + {0x0000b190, 0x075f0740}, + {0x0000b194, 0x077f0760}, + {0x0000b198, 0x07800781}, + {0x0000b19c, 0x07a0079f}, + {0x0000b1a0, 0x07c107bf}, + {0x0000b1a4, 0x000007c0}, + {0x0000b1a8, 0x00000000}, + {0x0000b1ac, 0x00000000}, + {0x0000b1b0, 0x00000000}, + {0x0000b1b4, 0x00000000}, + {0x0000b1b8, 0x00000000}, + {0x0000b1bc, 0x00000000}, + {0x0000b1c0, 0x00000000}, + {0x0000b1c4, 0x00000000}, + {0x0000b1c8, 0x00000000}, + {0x0000b1cc, 0x00000000}, + {0x0000b1d0, 0x00000000}, + {0x0000b1d4, 0x00000000}, + {0x0000b1d8, 0x00000000}, + {0x0000b1dc, 0x00000000}, + {0x0000b1e0, 0x00000000}, + {0x0000b1e4, 0x00000000}, + {0x0000b1e8, 0x00000000}, + {0x0000b1ec, 0x00000000}, + {0x0000b1f0, 0x00000396}, + {0x0000b1f4, 0x00000396}, + {0x0000b1f8, 0x00000396}, + {0x0000b1fc, 0x00000196}, +}; + +static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, + {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, + {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400}, + {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402}, + {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404}, + {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603}, + {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02}, + {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04}, + {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20}, + {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20}, + {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22}, + {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24}, + {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640}, + {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, + {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861}, + {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81}, + {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83}, + {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84}, + {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3}, + {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5}, + {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9}, + {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb}, + {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, + {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, + {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, + {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, + {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400}, + {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402}, + {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603}, + {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02}, + {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04}, + {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20}, + {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20}, + {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22}, + {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24}, + {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640}, + {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660}, + {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861}, + {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81}, + {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83}, + {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84}, + {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3}, + {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5}, + {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9}, + {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb}, + {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, + {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, + {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, + {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, + {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, + {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, + {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, +}; + +static const u32 ar9300_2p2_mac_core[][2] = { + /* Addr allmodes */ + {0x00000008, 0x00000000}, + {0x00000030, 0x00020085}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000000}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x000010f0, 0x00000100}, + {0x00001270, 0x00000000}, + {0x000012b0, 0x00000000}, + {0x000012f0, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00008000, 0x00000000}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000000}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008040, 0x00000000}, + {0x00008044, 0x00000000}, + {0x00008048, 0x00000000}, + {0x0000804c, 0xffffffff}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x00008070, 0x00000310}, + {0x00008074, 0x00000020}, + {0x00008078, 0x00000000}, + {0x0000809c, 0x0000000f}, + {0x000080a0, 0x00000000}, + {0x000080a4, 0x02ff0000}, + {0x000080a8, 0x0e070605}, + {0x000080ac, 0x0000000d}, + {0x000080b0, 0x00000000}, + {0x000080b4, 0x00000000}, + {0x000080b8, 0x00000000}, + {0x000080bc, 0x00000000}, + {0x000080c0, 0x2a800000}, + {0x000080c4, 0x06900168}, + {0x000080c8, 0x13881c20}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00252500}, + {0x000080d4, 0x00a00000}, + {0x000080d8, 0x00400000}, + {0x000080dc, 0x00000000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x3f3f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00000000}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000000}, + {0x00008114, 0x000007ff}, + {0x00008118, 0x000000aa}, + {0x0000811c, 0x00003210}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x0000ffff}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x18486200}, + {0x00008174, 0x33332210}, + {0x00008178, 0x00000000}, + {0x0000817c, 0x00020000}, + {0x000081c0, 0x00000000}, + {0x000081c4, 0x33332210}, + {0x000081c8, 0x00000000}, + {0x000081cc, 0x00000000}, + {0x000081d4, 0x00000000}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f424}, + {0x00008248, 0x00000800}, + {0x0000824c, 0x0001e848}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x40000000}, + {0x00008260, 0x00080922}, + {0x00008264, 0x9bc00010}, + {0x00008268, 0xffffffff}, + {0x0000826c, 0x0000ffff}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000004}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x000000ff}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x0000829c, 0x00000000}, + {0x00008300, 0x00000140}, + {0x00008314, 0x00000000}, + {0x0000831c, 0x0000010d}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000007}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000700}, + {0x00008338, 0x00ff0000}, + {0x0000833c, 0x02400000}, + {0x00008340, 0x000107ff}, + {0x00008344, 0xaa48105b}, + {0x00008348, 0x008f0000}, + {0x0000835c, 0x00000000}, + {0x00008360, 0xffffffff}, + {0x00008364, 0xffffffff}, + {0x00008368, 0x00000000}, + {0x00008370, 0x00000000}, + {0x00008374, 0x000000ff}, + {0x00008378, 0x00000000}, + {0x0000837c, 0x00000000}, + {0x00008380, 0xffffffff}, + {0x00008384, 0xffffffff}, + {0x00008390, 0xffffffff}, + {0x00008394, 0xffffffff}, + {0x00008398, 0x00000000}, + {0x0000839c, 0x00000000}, + {0x000083a0, 0x00000000}, + {0x000083a4, 0x0000fa14}, + {0x000083a8, 0x000f0c00}, + {0x000083ac, 0x33332210}, + {0x000083b0, 0x33332210}, + {0x000083b4, 0x33332210}, + {0x000083b8, 0x33332210}, + {0x000083bc, 0x00000000}, + {0x000083c0, 0x00000000}, + {0x000083c4, 0x00000000}, + {0x000083c8, 0x00000000}, + {0x000083cc, 0x00000200}, + {0x000083d0, 0x000301ff}, +}; + +static const u32 ar9300Common_wo_xlna_rx_gain_table_2p2[][2] = { + /* Addr allmodes */ + {0x0000a000, 0x00010000}, + {0x0000a004, 0x00030002}, + {0x0000a008, 0x00050004}, + {0x0000a00c, 0x00810080}, + {0x0000a010, 0x00830082}, + {0x0000a014, 0x01810180}, + {0x0000a018, 0x01830182}, + {0x0000a01c, 0x01850184}, + {0x0000a020, 0x01890188}, + {0x0000a024, 0x018b018a}, + {0x0000a028, 0x018d018c}, + {0x0000a02c, 0x03820190}, + {0x0000a030, 0x03840383}, + {0x0000a034, 0x03880385}, + {0x0000a038, 0x038a0389}, + {0x0000a03c, 0x038c038b}, + {0x0000a040, 0x0390038d}, + {0x0000a044, 0x03920391}, + {0x0000a048, 0x03940393}, + {0x0000a04c, 0x03960395}, + {0x0000a050, 0x00000000}, + {0x0000a054, 0x00000000}, + {0x0000a058, 0x00000000}, + {0x0000a05c, 0x00000000}, + {0x0000a060, 0x00000000}, + {0x0000a064, 0x00000000}, + {0x0000a068, 0x00000000}, + {0x0000a06c, 0x00000000}, + {0x0000a070, 0x00000000}, + {0x0000a074, 0x00000000}, + {0x0000a078, 0x00000000}, + {0x0000a07c, 0x00000000}, + {0x0000a080, 0x29292929}, + {0x0000a084, 0x29292929}, + {0x0000a088, 0x29292929}, + {0x0000a08c, 0x29292929}, + {0x0000a090, 0x22292929}, + {0x0000a094, 0x1d1d2222}, + {0x0000a098, 0x0c111117}, + {0x0000a09c, 0x00030303}, + {0x0000a0a0, 0x00000000}, + {0x0000a0a4, 0x00000000}, + {0x0000a0a8, 0x00000000}, + {0x0000a0ac, 0x00000000}, + {0x0000a0b0, 0x00000000}, + {0x0000a0b4, 0x00000000}, + {0x0000a0b8, 0x00000000}, + {0x0000a0bc, 0x00000000}, + {0x0000a0c0, 0x001f0000}, + {0x0000a0c4, 0x01000101}, + {0x0000a0c8, 0x011e011f}, + {0x0000a0cc, 0x011c011d}, + {0x0000a0d0, 0x02030204}, + {0x0000a0d4, 0x02010202}, + {0x0000a0d8, 0x021f0200}, + {0x0000a0dc, 0x0302021e}, + {0x0000a0e0, 0x03000301}, + {0x0000a0e4, 0x031e031f}, + {0x0000a0e8, 0x0402031d}, + {0x0000a0ec, 0x04000401}, + {0x0000a0f0, 0x041e041f}, + {0x0000a0f4, 0x0502041d}, + {0x0000a0f8, 0x05000501}, + {0x0000a0fc, 0x051e051f}, + {0x0000a100, 0x06010602}, + {0x0000a104, 0x061f0600}, + {0x0000a108, 0x061d061e}, + {0x0000a10c, 0x07020703}, + {0x0000a110, 0x07000701}, + {0x0000a114, 0x00000000}, + {0x0000a118, 0x00000000}, + {0x0000a11c, 0x00000000}, + {0x0000a120, 0x00000000}, + {0x0000a124, 0x00000000}, + {0x0000a128, 0x00000000}, + {0x0000a12c, 0x00000000}, + {0x0000a130, 0x00000000}, + {0x0000a134, 0x00000000}, + {0x0000a138, 0x00000000}, + {0x0000a13c, 0x00000000}, + {0x0000a140, 0x001f0000}, + {0x0000a144, 0x01000101}, + {0x0000a148, 0x011e011f}, + {0x0000a14c, 0x011c011d}, + {0x0000a150, 0x02030204}, + {0x0000a154, 0x02010202}, + {0x0000a158, 0x021f0200}, + {0x0000a15c, 0x0302021e}, + {0x0000a160, 0x03000301}, + {0x0000a164, 0x031e031f}, + {0x0000a168, 0x0402031d}, + {0x0000a16c, 0x04000401}, + {0x0000a170, 0x041e041f}, + {0x0000a174, 0x0502041d}, + {0x0000a178, 0x05000501}, + {0x0000a17c, 0x051e051f}, + {0x0000a180, 0x06010602}, + {0x0000a184, 0x061f0600}, + {0x0000a188, 0x061d061e}, + {0x0000a18c, 0x07020703}, + {0x0000a190, 0x07000701}, + {0x0000a194, 0x00000000}, + {0x0000a198, 0x00000000}, + {0x0000a19c, 0x00000000}, + {0x0000a1a0, 0x00000000}, + {0x0000a1a4, 0x00000000}, + {0x0000a1a8, 0x00000000}, + {0x0000a1ac, 0x00000000}, + {0x0000a1b0, 0x00000000}, + {0x0000a1b4, 0x00000000}, + {0x0000a1b8, 0x00000000}, + {0x0000a1bc, 0x00000000}, + {0x0000a1c0, 0x00000000}, + {0x0000a1c4, 0x00000000}, + {0x0000a1c8, 0x00000000}, + {0x0000a1cc, 0x00000000}, + {0x0000a1d0, 0x00000000}, + {0x0000a1d4, 0x00000000}, + {0x0000a1d8, 0x00000000}, + {0x0000a1dc, 0x00000000}, + {0x0000a1e0, 0x00000000}, + {0x0000a1e4, 0x00000000}, + {0x0000a1e8, 0x00000000}, + {0x0000a1ec, 0x00000000}, + {0x0000a1f0, 0x00000396}, + {0x0000a1f4, 0x00000396}, + {0x0000a1f8, 0x00000396}, + {0x0000a1fc, 0x00000196}, + {0x0000b000, 0x00010000}, + {0x0000b004, 0x00030002}, + {0x0000b008, 0x00050004}, + {0x0000b00c, 0x00810080}, + {0x0000b010, 0x00830082}, + {0x0000b014, 0x01810180}, + {0x0000b018, 0x01830182}, + {0x0000b01c, 0x01850184}, + {0x0000b020, 0x02810280}, + {0x0000b024, 0x02830282}, + {0x0000b028, 0x02850284}, + {0x0000b02c, 0x02890288}, + {0x0000b030, 0x028b028a}, + {0x0000b034, 0x0388028c}, + {0x0000b038, 0x038a0389}, + {0x0000b03c, 0x038c038b}, + {0x0000b040, 0x0390038d}, + {0x0000b044, 0x03920391}, + {0x0000b048, 0x03940393}, + {0x0000b04c, 0x03960395}, + {0x0000b050, 0x00000000}, + {0x0000b054, 0x00000000}, + {0x0000b058, 0x00000000}, + {0x0000b05c, 0x00000000}, + {0x0000b060, 0x00000000}, + {0x0000b064, 0x00000000}, + {0x0000b068, 0x00000000}, + {0x0000b06c, 0x00000000}, + {0x0000b070, 0x00000000}, + {0x0000b074, 0x00000000}, + {0x0000b078, 0x00000000}, + {0x0000b07c, 0x00000000}, + {0x0000b080, 0x32323232}, + {0x0000b084, 0x2f2f3232}, + {0x0000b088, 0x23282a2d}, + {0x0000b08c, 0x1c1e2123}, + {0x0000b090, 0x14171919}, + {0x0000b094, 0x0e0e1214}, + {0x0000b098, 0x03050707}, + {0x0000b09c, 0x00030303}, + {0x0000b0a0, 0x00000000}, + {0x0000b0a4, 0x00000000}, + {0x0000b0a8, 0x00000000}, + {0x0000b0ac, 0x00000000}, + {0x0000b0b0, 0x00000000}, + {0x0000b0b4, 0x00000000}, + {0x0000b0b8, 0x00000000}, + {0x0000b0bc, 0x00000000}, + {0x0000b0c0, 0x003f0020}, + {0x0000b0c4, 0x00400041}, + {0x0000b0c8, 0x0140005f}, + {0x0000b0cc, 0x0160015f}, + {0x0000b0d0, 0x017e017f}, + {0x0000b0d4, 0x02410242}, + {0x0000b0d8, 0x025f0240}, + {0x0000b0dc, 0x027f0260}, + {0x0000b0e0, 0x0341027e}, + {0x0000b0e4, 0x035f0340}, + {0x0000b0e8, 0x037f0360}, + {0x0000b0ec, 0x04400441}, + {0x0000b0f0, 0x0460045f}, + {0x0000b0f4, 0x0541047f}, + {0x0000b0f8, 0x055f0540}, + {0x0000b0fc, 0x057f0560}, + {0x0000b100, 0x06400641}, + {0x0000b104, 0x0660065f}, + {0x0000b108, 0x067e067f}, + {0x0000b10c, 0x07410742}, + {0x0000b110, 0x075f0740}, + {0x0000b114, 0x077f0760}, + {0x0000b118, 0x07800781}, + {0x0000b11c, 0x07a0079f}, + {0x0000b120, 0x07c107bf}, + {0x0000b124, 0x000007c0}, + {0x0000b128, 0x00000000}, + {0x0000b12c, 0x00000000}, + {0x0000b130, 0x00000000}, + {0x0000b134, 0x00000000}, + {0x0000b138, 0x00000000}, + {0x0000b13c, 0x00000000}, + {0x0000b140, 0x003f0020}, + {0x0000b144, 0x00400041}, + {0x0000b148, 0x0140005f}, + {0x0000b14c, 0x0160015f}, + {0x0000b150, 0x017e017f}, + {0x0000b154, 0x02410242}, + {0x0000b158, 0x025f0240}, + {0x0000b15c, 0x027f0260}, + {0x0000b160, 0x0341027e}, + {0x0000b164, 0x035f0340}, + {0x0000b168, 0x037f0360}, + {0x0000b16c, 0x04400441}, + {0x0000b170, 0x0460045f}, + {0x0000b174, 0x0541047f}, + {0x0000b178, 0x055f0540}, + {0x0000b17c, 0x057f0560}, + {0x0000b180, 0x06400641}, + {0x0000b184, 0x0660065f}, + {0x0000b188, 0x067e067f}, + {0x0000b18c, 0x07410742}, + {0x0000b190, 0x075f0740}, + {0x0000b194, 0x077f0760}, + {0x0000b198, 0x07800781}, + {0x0000b19c, 0x07a0079f}, + {0x0000b1a0, 0x07c107bf}, + {0x0000b1a4, 0x000007c0}, + {0x0000b1a8, 0x00000000}, + {0x0000b1ac, 0x00000000}, + {0x0000b1b0, 0x00000000}, + {0x0000b1b4, 0x00000000}, + {0x0000b1b8, 0x00000000}, + {0x0000b1bc, 0x00000000}, + {0x0000b1c0, 0x00000000}, + {0x0000b1c4, 0x00000000}, + {0x0000b1c8, 0x00000000}, + {0x0000b1cc, 0x00000000}, + {0x0000b1d0, 0x00000000}, + {0x0000b1d4, 0x00000000}, + {0x0000b1d8, 0x00000000}, + {0x0000b1dc, 0x00000000}, + {0x0000b1e0, 0x00000000}, + {0x0000b1e4, 0x00000000}, + {0x0000b1e8, 0x00000000}, + {0x0000b1ec, 0x00000000}, + {0x0000b1f0, 0x00000396}, + {0x0000b1f4, 0x00000396}, + {0x0000b1f8, 0x00000396}, + {0x0000b1fc, 0x00000196}, +}; + +static const u32 ar9300_2p2_soc_preamble[][2] = { + /* Addr allmodes */ + {0x000040a4, 0x00a0c1c9}, + {0x00007008, 0x00000000}, + {0x00007020, 0x00000000}, + {0x00007034, 0x00000002}, + {0x00007038, 0x000004c2}, + {0x00007048, 0x00000008}, +}; + +static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = { + /* Addr allmodes */ + {0x00004040, 0x08212e5e}, + {0x00004040, 0x0008003b}, + {0x00004044, 0x00000000}, +}; + +static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = { + /* Addr allmodes */ + {0x00004040, 0x08253e5e}, + {0x00004040, 0x0008003b}, + {0x00004044, 0x00000000}, +}; + +static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = { + /* Addr allmodes */ + {0x00004040, 0x08213e5e}, + {0x00004040, 0x0008003b}, + {0x00004044, 0x00000000}, +}; + +#endif /* INITVALS_9003_2P2_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 56a9e5fa6d66f727055dcaaa2f5dc3da431bd156..5a06503991368f1f73bef5ed8df588970f68000f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -739,6 +739,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, */ ar9003_hw_set_chain_masks(ah, 0x7, 0x7); + /* Do Tx IQ Calibration */ + ar9003_hw_tx_iq_cal(ah); + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); + udelay(5); + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); + /* Calibrate the AGC */ REG_WRITE(ah, AR_PHY_AGC_CONTROL, REG_READ(ah, AR_PHY_AGC_CONTROL) | @@ -753,10 +759,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, return false; } - /* Do Tx IQ Calibration */ - if (ah->config.tx_iq_calibration) - ar9003_hw_tx_iq_cal(ah); - /* Revert chainmasks to their original values before NF cal */ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 23eb60ea54551eee078009418c1159eaffc622f2..ace8d2678b18a890c3d64b2756623ce0f29be9c4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -67,6 +67,7 @@ static const struct ar9300_eeprom ar9300_default = { * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled + * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 3, @@ -129,9 +130,11 @@ static const struct ar9300_eeprom ar9300_default = { .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, - .futureModal = { /* [32] */ + .papdRateMaskHt20 = LE32(0x80c080), + .papdRateMaskHt40 = LE32(0x80c080), + .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + 0, 0, 0, 0, 0, 0, 0, 0 }, }, .calFreqPier2G = { @@ -326,9 +329,11 @@ static const struct ar9300_eeprom ar9300_default = { .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, + .papdRateMaskHt20 = LE32(0xf0e0e0), + .papdRateMaskHt40 = LE32(0xf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + 0, 0, 0, 0, 0, 0, 0, 0 }, }, .calFreqPier5G = { @@ -644,6 +649,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, return (pBase->featureEnable & 0x10) >> 4; case EEP_SWREG: return le32_to_cpu(pBase->swreg); + case EEP_PAPRD: + return !!(pBase->featureEnable & BIT(5)); default: return 0; } @@ -944,7 +951,7 @@ static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah, return 1; } -static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 23fb353c3bba502f3eeacf1eaa0fbbea6d19c715..3c533bb983c72859c9fbea4563070f387747506e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -234,7 +234,9 @@ struct ar9300_modal_eep_header { u8 txEndToRxOn; u8 txFrameToXpaOn; u8 thresh62; - u8 futureModal[32]; + __le32 papdRateMaskHt20; + __le32 papdRateMaskHt40; + u8 futureModal[24]; } __packed; struct ar9300_cal_data_per_freq_op_loop { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index b15309caf1da816418ea447d2f7152f3ac438622..064168909108005531e90e9ea657f51a43866794 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -16,7 +16,8 @@ #include "hw.h" #include "ar9003_mac.h" -#include "ar9003_initvals.h" +#include "ar9003_2p0_initvals.h" +#include "ar9003_2p2_initvals.h" /* General hardware code for the AR9003 hadware family */ @@ -31,12 +32,8 @@ static bool ar9003_hw_macversion_supported(u32 macversion) return false; } -/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */ -/* - * XXX: move TX/RX gain INI to its own init_mode_gain_regs after - * ensuring it does not affect hardware bring up - */ -static void ar9003_hw_init_mode_regs(struct ath_hw *ah) +/* AR9003 2.0 */ +static void ar9003_2p0_hw_init_mode_regs(struct ath_hw *ah) { /* mac */ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); @@ -106,27 +103,128 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 3); } +/* AR9003 2.2 */ +static void ar9003_2p2_hw_init_mode_regs(struct ath_hw *ah) +{ + /* mac */ + INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); + INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], + ar9300_2p2_mac_core, + ARRAY_SIZE(ar9300_2p2_mac_core), 2); + INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST], + ar9300_2p2_mac_postamble, + ARRAY_SIZE(ar9300_2p2_mac_postamble), 5); + + /* bb */ + INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0); + INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE], + ar9300_2p2_baseband_core, + ARRAY_SIZE(ar9300_2p2_baseband_core), 2); + INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST], + ar9300_2p2_baseband_postamble, + ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5); + + /* radio */ + INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0); + INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE], + ar9300_2p2_radio_core, + ARRAY_SIZE(ar9300_2p2_radio_core), 2); + INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], + ar9300_2p2_radio_postamble, + ARRAY_SIZE(ar9300_2p2_radio_postamble), 5); + + /* soc */ + INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE], + ar9300_2p2_soc_preamble, + ARRAY_SIZE(ar9300_2p2_soc_preamble), 2); + INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0); + INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], + ar9300_2p2_soc_postamble, + ARRAY_SIZE(ar9300_2p2_soc_postamble), 5); + + /* rx/tx gain */ + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9300Common_rx_gain_table_2p2, + ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2); + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_lowest_ob_db_tx_gain_table_2p2, + ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2), + 5); + + /* Load PCIE SERDES settings from INI */ + + /* Awake Setting */ + + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9300PciePhy_pll_on_clkreq_disable_L1_2p2, + ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2), + 2); + + /* Sleep Setting */ + + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9300PciePhy_clkreq_enable_L1_2p2, + ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2), + 2); + + /* Fast clock modal settings */ + INIT_INI_ARRAY(&ah->iniModesAdditional, + ar9300Modes_fast_clock_2p2, + ARRAY_SIZE(ar9300Modes_fast_clock_2p2), + 3); +} + +/* + * The AR9003 family uses a new INI format (pre, core, post + * arrays per subsystem). + */ +static void ar9003_hw_init_mode_regs(struct ath_hw *ah) +{ + if (AR_SREV_9300_20(ah)) + ar9003_2p0_hw_init_mode_regs(ah); + else + ar9003_2p2_hw_init_mode_regs(ah); +} + static void ar9003_tx_gain_table_apply(struct ath_hw *ah) { switch (ar9003_hw_get_tx_gain_idx(ah)) { case 0: default: - INIT_INI_ARRAY(&ah->iniModesTxGain, - ar9300Modes_lowest_ob_db_tx_gain_table_2p0, - ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0), - 5); + if (AR_SREV_9300_20(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_lowest_ob_db_tx_gain_table_2p0, + ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0), + 5); + else + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_lowest_ob_db_tx_gain_table_2p2, + ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2), + 5); break; case 1: - INIT_INI_ARRAY(&ah->iniModesTxGain, - ar9300Modes_high_ob_db_tx_gain_table_2p0, - ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0), - 5); + if (AR_SREV_9300_20(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_high_ob_db_tx_gain_table_2p0, + ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0), + 5); + else + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_high_ob_db_tx_gain_table_2p2, + ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2), + 5); break; case 2: - INIT_INI_ARRAY(&ah->iniModesTxGain, - ar9300Modes_low_ob_db_tx_gain_table_2p0, - ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0), - 5); + if (AR_SREV_9300_20(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_low_ob_db_tx_gain_table_2p0, + ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0), + 5); + else + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9300Modes_low_ob_db_tx_gain_table_2p2, + ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2), + 5); break; } } @@ -136,15 +234,28 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah) switch (ar9003_hw_get_rx_gain_idx(ah)) { case 0: default: - INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0, - ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), - 2); + if (AR_SREV_9300_20(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9300Common_rx_gain_table_2p0, + ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), + 2); + else + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9300Common_rx_gain_table_2p2, + ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), + 2); break; case 1: - INIT_INI_ARRAY(&ah->iniModesRxGain, - ar9300Common_wo_xlna_rx_gain_table_2p0, - ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0), - 2); + if (AR_SREV_9300_20(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9300Common_wo_xlna_rx_gain_table_2p0, + ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0), + 2); + else + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9300Common_wo_xlna_rx_gain_table_2p2, + ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2), + 2); break; } } @@ -184,6 +295,26 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah, /* Several PCIe massages to ensure proper behaviour */ if (ah->config.pcie_waen) REG_WRITE(ah, AR_WA, ah->config.pcie_waen); + else + REG_WRITE(ah, AR_WA, ah->WARegVal); + } + + /* + * Configire PCIE after Ini init. SERDES values now come from ini file + * This enables PCIe low power mode. + */ + if (ah->config.pcieSerDesWrite) { + unsigned int i; + struct ar5416IniArray *array; + + array = power_off ? &ah->iniPcieSerdes : + &ah->iniPcieSerdesLowPower; + + for (i = 0; i < array->ia_rows; i++) { + REG_WRITE(ah, + INI_RA(array, i, 0), + INI_RA(array, i, 1)); + } } } @@ -202,4 +333,6 @@ void ar9003_hw_attach_ops(struct ath_hw *ah) ar9003_hw_attach_phy_ops(ah); ar9003_hw_attach_calib_ops(ah); ar9003_hw_attach_mac_ops(ah); + + ath9k_hw_attach_ani_ops_new(ah); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 37ba37481a47eea72df41a631e3b6252423b334e..5b995bee70ae4a931e6337c6723618edef4df725 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -90,6 +90,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) MAP_ISR_S2_CST); mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >> MAP_ISR_S2_TSFOOR); + mask2 |= ((isr2 & AR_ISR_S2_BB_WATCHDOG) >> + MAP_ISR_S2_BB_WATCHDOG); if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { REG_WRITE(ah, AR_ISR_S2, isr2); @@ -167,6 +169,9 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) (void) REG_READ(ah, AR_ISR); } + + if (*masked & ATH9K_INT_BB_WATCHDOG) + ar9003_hw_bb_watchdog_read(ah); } if (sync_cause) { @@ -465,6 +470,14 @@ static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds, ads->ctl11 &= ~AR_VirtMoreFrag; } +void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains) +{ + struct ar9003_txc *ads = ds; + + ads->ctl12 |= SM(chains, AR_PAPRDChainMask); +} +EXPORT_SYMBOL(ar9003_hw_set_paprd_txdesc); + void ar9003_hw_attach_mac_ops(struct ath_hw *hw) { struct ath_hw_ops *ops = ath9k_hw_ops(hw); @@ -566,12 +579,39 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; if ((rxsp->status11 & AR_RxFrameOK) == 0) { + /* + * AR_CRCErr will bet set to true if we're on the last + * subframe and the AR_PostDelimCRCErr is caught. + * In a way this also gives us a guarantee that when + * (!(AR_CRCErr) && (AR_PostDelimCRCErr)) we cannot + * possibly be reviewing the last subframe. AR_CRCErr + * is the CRC of the actual data. + */ if (rxsp->status11 & AR_CRCErr) { rxs->rs_status |= ATH9K_RXERR_CRC; } else if (rxsp->status11 & AR_PHYErr) { - rxs->rs_status |= ATH9K_RXERR_PHY; phyerr = MS(rxsp->status11, AR_PHYErrCode); - rxs->rs_phyerr = phyerr; + /* + * If we reach a point here where AR_PostDelimCRCErr is + * true it implies we're *not* on the last subframe. In + * in that case that we know already that the CRC of + * the frame was OK, and MAC would send an ACK for that + * subframe, even if we did get a phy error of type + * ATH9K_PHYERR_OFDM_RESTART. This is only applicable + * to frame that are prior to the last subframe. + * The AR_PostDelimCRCErr is the CRC for the MPDU + * delimiter, which contains the 4 reserved bits, + * the MPDU length (12 bits), and follows the MPDU + * delimiter for an A-MPDU subframe (0x4E = 'N' ASCII). + */ + if ((phyerr == ATH9K_PHYERR_OFDM_RESTART) && + (rxsp->status11 & AR_PostDelimCRCErr)) { + rxs->rs_phyerr = 0; + } else { + rxs->rs_status |= ATH9K_RXERR_PHY; + rxs->rs_phyerr = phyerr; + } + } else if (rxsp->status11 & AR_DecryptCRCErr) { rxs->rs_status |= ATH9K_RXERR_DECRYPT; } else if (rxsp->status11 & AR_MichaelErr) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h index f17558b1453930a1eeaff471e1f1009205c9f009..9f2cea70a840801dc1f767993a83b02fa4c29eb1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h @@ -33,13 +33,14 @@ #define AR_TxDescId_S 16 #define AR_TxPtrChkSum 0x0000ffff -#define AR_TxTid 0xf0000000 -#define AR_TxTid_S 28 - #define AR_LowRxChain 0x00004000 #define AR_Not_Sounding 0x20000000 +/* ctl 12 */ +#define AR_PAPRDChainMask 0x00000e00 +#define AR_PAPRDChainMask_S 9 + #define MAP_ISR_S2_CST 6 #define MAP_ISR_S2_GTT 6 #define MAP_ISR_S2_TIM 3 @@ -47,6 +48,7 @@ #define MAP_ISR_S2_DTIMSYNC 7 #define MAP_ISR_S2_DTIM 7 #define MAP_ISR_S2_TSFOOR 4 +#define MAP_ISR_S2_BB_WATCHDOG 6 #define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c new file mode 100644 index 0000000000000000000000000000000000000000..49e0c865ce5c8da2ffb12a0beb9c31cf0d1616e0 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2010 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" +#include "ar9003_phy.h" + +void ar9003_paprd_enable(struct ath_hw *ah, bool val) +{ + REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0, + AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1, + AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2, + AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val); +} +EXPORT_SYMBOL(ar9003_paprd_enable); + +static void ar9003_paprd_setup_single_table(struct ath_hw *ah) +{ + struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + struct ar9300_modal_eep_header *hdr; + const u32 ctrl0[3] = { + AR_PHY_PAPRD_CTRL0_B0, + AR_PHY_PAPRD_CTRL0_B1, + AR_PHY_PAPRD_CTRL0_B2 + }; + const u32 ctrl1[3] = { + AR_PHY_PAPRD_CTRL1_B0, + AR_PHY_PAPRD_CTRL1_B1, + AR_PHY_PAPRD_CTRL1_B2 + }; + u32 am_mask, ht40_mask; + int i; + + if (ah->curchan && IS_CHAN_5GHZ(ah->curchan)) + hdr = &eep->modalHeader5G; + else + hdr = &eep->modalHeader2G; + + am_mask = le32_to_cpu(hdr->papdRateMaskHt20); + ht40_mask = le32_to_cpu(hdr->papdRateMaskHt40); + + REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, am_mask); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, am_mask); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ht40_mask); + + for (i = 0; i < 3; i++) { + REG_RMW_FIELD(ah, ctrl0[i], + AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1); + REG_RMW_FIELD(ah, ctrl1[i], + AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE, 1); + REG_RMW_FIELD(ah, ctrl1[i], + AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE, 1); + REG_RMW_FIELD(ah, ctrl1[i], + AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0); + REG_RMW_FIELD(ah, ctrl1[i], + AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK, 181); + REG_RMW_FIELD(ah, ctrl1[i], + AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT, 361); + REG_RMW_FIELD(ah, ctrl1[i], + AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0); + REG_RMW_FIELD(ah, ctrl0[i], + AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH, 3); + } + + ar9003_paprd_enable(ah, false); + + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, + AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2, + AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, 147); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE, + -15); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4, + AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4, + AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4, + AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES, + 100); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 261376); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_1_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 248079); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_2_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 233759); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_3_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 220464); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_4_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 208194); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_5_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 196949); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_6_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 185706); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0, + AR_PHY_PAPRD_PRE_POST_SCALING, 175487); +} + +static void ar9003_paprd_get_gain_table(struct ath_hw *ah) +{ + u32 *entry = ah->paprd_gain_table_entries; + u8 *index = ah->paprd_gain_table_index; + u32 reg = AR_PHY_TXGAIN_TABLE; + int i; + + memset(entry, 0, sizeof(ah->paprd_gain_table_entries)); + memset(index, 0, sizeof(ah->paprd_gain_table_index)); + + for (i = 0; i < 32; i++) { + entry[i] = REG_READ(ah, reg); + index[i] = (entry[i] >> 24) & 0xff; + reg += 4; + } +} + +static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain, + int target_power) +{ + int olpc_gain_delta = 0; + int alpha_therm, alpha_volt; + int therm_cal_value, volt_cal_value; + int therm_value, volt_value; + int thermal_gain_corr, voltage_gain_corr; + int desired_scale, desired_gain = 0; + u32 reg; + + REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, + AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); + desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12, + AR_PHY_TPC_12_DESIRED_SCALE_HT40_5); + alpha_therm = REG_READ_FIELD(ah, AR_PHY_TPC_19, + AR_PHY_TPC_19_ALPHA_THERM); + alpha_volt = REG_READ_FIELD(ah, AR_PHY_TPC_19, + AR_PHY_TPC_19_ALPHA_VOLT); + therm_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18, + AR_PHY_TPC_18_THERM_CAL_VALUE); + volt_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18, + AR_PHY_TPC_18_VOLT_CAL_VALUE); + therm_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4, + AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE); + volt_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4, + AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE); + + if (chain == 0) + reg = AR_PHY_TPC_11_B0; + else if (chain == 1) + reg = AR_PHY_TPC_11_B1; + else + reg = AR_PHY_TPC_11_B2; + + olpc_gain_delta = REG_READ_FIELD(ah, reg, + AR_PHY_TPC_11_OLPC_GAIN_DELTA); + + if (olpc_gain_delta >= 128) + olpc_gain_delta = olpc_gain_delta - 256; + + thermal_gain_corr = (alpha_therm * (therm_value - therm_cal_value) + + (256 / 2)) / 256; + voltage_gain_corr = (alpha_volt * (volt_value - volt_cal_value) + + (128 / 2)) / 128; + desired_gain = target_power - olpc_gain_delta - thermal_gain_corr - + voltage_gain_corr + desired_scale; + + return desired_gain; +} + +static void ar9003_tx_force_gain(struct ath_hw *ah, unsigned int gain_index) +{ + int selected_gain_entry, txbb1dbgain, txbb6dbgain, txmxrgain; + int padrvgnA, padrvgnB, padrvgnC, padrvgnD; + u32 *gain_table_entries = ah->paprd_gain_table_entries; + + selected_gain_entry = gain_table_entries[gain_index]; + txbb1dbgain = selected_gain_entry & 0x7; + txbb6dbgain = (selected_gain_entry >> 3) & 0x3; + txmxrgain = (selected_gain_entry >> 5) & 0xf; + padrvgnA = (selected_gain_entry >> 9) & 0xf; + padrvgnB = (selected_gain_entry >> 13) & 0xf; + padrvgnC = (selected_gain_entry >> 17) & 0xf; + padrvgnD = (selected_gain_entry >> 21) & 0x3; + + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN, txbb1dbgain); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN, txbb6dbgain); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN, txmxrgain); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA, padrvgnA); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB, padrvgnB); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC, padrvgnC); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND, padrvgnD); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL, 0); + REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, + AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN, 0); + REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCED_DAC_GAIN, 0); + REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCE_DAC_GAIN, 0); +} + +static inline int find_expn(int num) +{ + return fls(num) - 1; +} + +static inline int find_proper_scale(int expn, int N) +{ + return (expn > N) ? expn - 10 : 0; +} + +#define NUM_BIN 23 + +static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain) +{ + unsigned int thresh_accum_cnt; + int x_est[NUM_BIN + 1], Y[NUM_BIN + 1], theta[NUM_BIN + 1]; + int PA_in[NUM_BIN + 1]; + int B1_tmp[NUM_BIN + 1], B2_tmp[NUM_BIN + 1]; + unsigned int B1_abs_max, B2_abs_max; + int max_index, scale_factor; + int y_est[NUM_BIN + 1]; + int x_est_fxp1_nonlin, x_tilde[NUM_BIN + 1]; + unsigned int x_tilde_abs; + int G_fxp, Y_intercept, order_x_by_y, M, I, L, sum_y_sqr, sum_y_quad; + int Q_x, Q_B1, Q_B2, beta_raw, alpha_raw, scale_B; + int Q_scale_B, Q_beta, Q_alpha, alpha, beta, order_1, order_2; + int order1_5x, order2_3x, order1_5x_rem, order2_3x_rem; + int y5, y3, tmp; + int theta_low_bin = 0; + int i; + + /* disregard any bin that contains <= 16 samples */ + thresh_accum_cnt = 16; + scale_factor = 5; + max_index = 0; + memset(theta, 0, sizeof(theta)); + memset(x_est, 0, sizeof(x_est)); + memset(Y, 0, sizeof(Y)); + memset(y_est, 0, sizeof(y_est)); + memset(x_tilde, 0, sizeof(x_tilde)); + + for (i = 0; i < NUM_BIN; i++) { + s32 accum_cnt, accum_tx, accum_rx, accum_ang; + + /* number of samples */ + accum_cnt = data_L[i] & 0xffff; + + if (accum_cnt <= thresh_accum_cnt) + continue; + + /* sum(tx amplitude) */ + accum_tx = ((data_L[i] >> 16) & 0xffff) | + ((data_U[i] & 0x7ff) << 16); + + /* sum(rx amplitude distance to lower bin edge) */ + accum_rx = ((data_U[i] >> 11) & 0x1f) | + ((data_L[i + 23] & 0xffff) << 5); + + /* sum(angles) */ + accum_ang = ((data_L[i + 23] >> 16) & 0xffff) | + ((data_U[i + 23] & 0x7ff) << 16); + + accum_tx <<= scale_factor; + accum_rx <<= scale_factor; + x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >> + scale_factor; + + Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >> + scale_factor) + + (1 << scale_factor) * max_index + 16; + + if (accum_ang >= (1 << 26)) + accum_ang -= 1 << 27; + + theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) / + accum_cnt; + + max_index++; + } + + /* + * Find average theta of first 5 bin and all of those to same value. + * Curve is linear at that range. + */ + for (i = 1; i < 6; i++) + theta_low_bin += theta[i]; + + theta_low_bin = theta_low_bin / 5; + for (i = 1; i < 6; i++) + theta[i] = theta_low_bin; + + /* Set values at origin */ + theta[0] = theta_low_bin; + for (i = 0; i <= max_index; i++) + theta[i] -= theta_low_bin; + + x_est[0] = 0; + Y[0] = 0; + scale_factor = 8; + + /* low signal gain */ + if (x_est[6] == x_est[3]) + return false; + + G_fxp = + (((Y[6] - Y[3]) * 1 << scale_factor) + + (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]); + + Y_intercept = + (G_fxp * (x_est[0] - x_est[3]) + + (1 << scale_factor)) / (1 << scale_factor) + Y[3]; + + for (i = 0; i <= max_index; i++) + y_est[i] = Y[i] - Y_intercept; + + for (i = 0; i <= 3; i++) { + y_est[i] = i * 32; + + /* prevent division by zero */ + if (G_fxp == 0) + return false; + + x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp; + } + + x_est_fxp1_nonlin = + x_est[max_index] - ((1 << scale_factor) * y_est[max_index] + + G_fxp) / G_fxp; + + order_x_by_y = + (x_est_fxp1_nonlin + y_est[max_index]) / y_est[max_index]; + + if (order_x_by_y == 0) + M = 10; + else if (order_x_by_y == 1) + M = 9; + else + M = 8; + + I = (max_index > 15) ? 7 : max_index >> 1; + L = max_index - I; + scale_factor = 8; + sum_y_sqr = 0; + sum_y_quad = 0; + x_tilde_abs = 0; + + for (i = 0; i <= L; i++) { + unsigned int y_sqr; + unsigned int y_quad; + unsigned int tmp_abs; + + /* prevent division by zero */ + if (y_est[i + I] == 0) + return false; + + x_est_fxp1_nonlin = + x_est[i + I] - ((1 << scale_factor) * y_est[i + I] + + G_fxp) / G_fxp; + + x_tilde[i] = + (x_est_fxp1_nonlin * (1 << M) + y_est[i + I]) / y_est[i + + I]; + x_tilde[i] = + (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I]; + x_tilde[i] = + (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I]; + y_sqr = + (y_est[i + I] * y_est[i + I] + + (scale_factor * scale_factor)) / (scale_factor * + scale_factor); + tmp_abs = abs(x_tilde[i]); + if (tmp_abs > x_tilde_abs) + x_tilde_abs = tmp_abs; + + y_quad = y_sqr * y_sqr; + sum_y_sqr = sum_y_sqr + y_sqr; + sum_y_quad = sum_y_quad + y_quad; + B1_tmp[i] = y_sqr * (L + 1); + B2_tmp[i] = y_sqr; + } + + B1_abs_max = 0; + B2_abs_max = 0; + for (i = 0; i <= L; i++) { + int abs_val; + + B1_tmp[i] -= sum_y_sqr; + B2_tmp[i] = sum_y_quad - sum_y_sqr * B2_tmp[i]; + + abs_val = abs(B1_tmp[i]); + if (abs_val > B1_abs_max) + B1_abs_max = abs_val; + + abs_val = abs(B2_tmp[i]); + if (abs_val > B2_abs_max) + B2_abs_max = abs_val; + } + + Q_x = find_proper_scale(find_expn(x_tilde_abs), 10); + Q_B1 = find_proper_scale(find_expn(B1_abs_max), 10); + Q_B2 = find_proper_scale(find_expn(B2_abs_max), 10); + + beta_raw = 0; + alpha_raw = 0; + for (i = 0; i <= L; i++) { + x_tilde[i] = x_tilde[i] / (1 << Q_x); + B1_tmp[i] = B1_tmp[i] / (1 << Q_B1); + B2_tmp[i] = B2_tmp[i] / (1 << Q_B2); + beta_raw = beta_raw + B1_tmp[i] * x_tilde[i]; + alpha_raw = alpha_raw + B2_tmp[i] * x_tilde[i]; + } + + scale_B = + ((sum_y_quad / scale_factor) * (L + 1) - + (sum_y_sqr / scale_factor) * sum_y_sqr) * scale_factor; + + Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10); + scale_B = scale_B / (1 << Q_scale_B); + Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10); + Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10); + beta_raw = beta_raw / (1 << Q_beta); + alpha_raw = alpha_raw / (1 << Q_alpha); + alpha = (alpha_raw << 10) / scale_B; + beta = (beta_raw << 10) / scale_B; + order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B; + order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B; + order1_5x = order_1 / 5; + order2_3x = order_2 / 3; + order1_5x_rem = order_1 - 5 * order1_5x; + order2_3x_rem = order_2 - 3 * order2_3x; + + for (i = 0; i < PAPRD_TABLE_SZ; i++) { + tmp = i * 32; + y5 = ((beta * tmp) >> 6) >> order1_5x; + y5 = (y5 * tmp) >> order1_5x; + y5 = (y5 * tmp) >> order1_5x; + y5 = (y5 * tmp) >> order1_5x; + y5 = (y5 * tmp) >> order1_5x; + y5 = y5 >> order1_5x_rem; + y3 = (alpha * tmp) >> order2_3x; + y3 = (y3 * tmp) >> order2_3x; + y3 = (y3 * tmp) >> order2_3x; + y3 = y3 >> order2_3x_rem; + PA_in[i] = y5 + y3 + (256 * tmp) / G_fxp; + + if (i >= 2) { + tmp = PA_in[i] - PA_in[i - 1]; + if (tmp < 0) + PA_in[i] = + PA_in[i - 1] + (PA_in[i - 1] - + PA_in[i - 2]); + } + + PA_in[i] = (PA_in[i] < 1400) ? PA_in[i] : 1400; + } + + beta_raw = 0; + alpha_raw = 0; + + for (i = 0; i <= L; i++) { + int theta_tilde = + ((theta[i + I] << M) + y_est[i + I]) / y_est[i + I]; + theta_tilde = + ((theta_tilde << M) + y_est[i + I]) / y_est[i + I]; + theta_tilde = + ((theta_tilde << M) + y_est[i + I]) / y_est[i + I]; + beta_raw = beta_raw + B1_tmp[i] * theta_tilde; + alpha_raw = alpha_raw + B2_tmp[i] * theta_tilde; + } + + Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10); + Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10); + beta_raw = beta_raw / (1 << Q_beta); + alpha_raw = alpha_raw / (1 << Q_alpha); + + alpha = (alpha_raw << 10) / scale_B; + beta = (beta_raw << 10) / scale_B; + order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B + 5; + order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B + 5; + order1_5x = order_1 / 5; + order2_3x = order_2 / 3; + order1_5x_rem = order_1 - 5 * order1_5x; + order2_3x_rem = order_2 - 3 * order2_3x; + + for (i = 0; i < PAPRD_TABLE_SZ; i++) { + int PA_angle; + + /* pa_table[4] is calculated from PA_angle for i=5 */ + if (i == 4) + continue; + + tmp = i * 32; + if (beta > 0) + y5 = (((beta * tmp - 64) >> 6) - + (1 << order1_5x)) / (1 << order1_5x); + else + y5 = ((((beta * tmp - 64) >> 6) + + (1 << order1_5x)) / (1 << order1_5x)); + + y5 = (y5 * tmp) / (1 << order1_5x); + y5 = (y5 * tmp) / (1 << order1_5x); + y5 = (y5 * tmp) / (1 << order1_5x); + y5 = (y5 * tmp) / (1 << order1_5x); + y5 = y5 / (1 << order1_5x_rem); + + if (beta > 0) + y3 = (alpha * tmp - + (1 << order2_3x)) / (1 << order2_3x); + else + y3 = (alpha * tmp + + (1 << order2_3x)) / (1 << order2_3x); + y3 = (y3 * tmp) / (1 << order2_3x); + y3 = (y3 * tmp) / (1 << order2_3x); + y3 = y3 / (1 << order2_3x_rem); + + if (i < 4) { + PA_angle = 0; + } else { + PA_angle = y5 + y3; + if (PA_angle < -150) + PA_angle = -150; + else if (PA_angle > 150) + PA_angle = 150; + } + + pa_table[i] = ((PA_in[i] & 0x7ff) << 11) + (PA_angle & 0x7ff); + if (i == 5) { + PA_angle = (PA_angle + 2) >> 1; + pa_table[i - 1] = ((PA_in[i - 1] & 0x7ff) << 11) + + (PA_angle & 0x7ff); + } + } + + *gain = G_fxp; + return true; +} + +void ar9003_paprd_populate_single_table(struct ath_hw *ah, + struct ath9k_channel *chan, int chain) +{ + u32 *paprd_table_val = chan->pa_table[chain]; + u32 small_signal_gain = chan->small_signal_gain[chain]; + u32 training_power; + u32 reg = 0; + int i; + + training_power = + REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5, + AR_PHY_POWERTX_RATE5_POWERTXHT20_0); + training_power -= 4; + + if (chain == 0) + reg = AR_PHY_PAPRD_MEM_TAB_B0; + else if (chain == 1) + reg = AR_PHY_PAPRD_MEM_TAB_B1; + else if (chain == 2) + reg = AR_PHY_PAPRD_MEM_TAB_B2; + + for (i = 0; i < PAPRD_TABLE_SZ; i++) { + REG_WRITE(ah, reg, paprd_table_val[i]); + reg = reg + 4; + } + + if (chain == 0) + reg = AR_PHY_PA_GAIN123_B0; + else if (chain == 1) + reg = AR_PHY_PA_GAIN123_B1; + else + reg = AR_PHY_PA_GAIN123_B2; + + REG_RMW_FIELD(ah, reg, AR_PHY_PA_GAIN123_PA_GAIN1, small_signal_gain); + + REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B0, + AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL, + training_power); + + REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1, + AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL, + training_power); + + REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2, + AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL, + training_power); +} +EXPORT_SYMBOL(ar9003_paprd_populate_single_table); + +int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain) +{ + + unsigned int i, desired_gain, gain_index; + unsigned int train_power; + + train_power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5, + AR_PHY_POWERTX_RATE5_POWERTXHT20_0); + + train_power = train_power - 4; + + desired_gain = ar9003_get_desired_gain(ah, chain, train_power); + + gain_index = 0; + for (i = 0; i < 32; i++) { + if (ah->paprd_gain_table_index[i] >= desired_gain) + break; + gain_index++; + } + + ar9003_tx_force_gain(ah, gain_index); + + REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, + AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); + + return 0; +} +EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); + +int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, + int chain) +{ + u16 *small_signal_gain = &chan->small_signal_gain[chain]; + u32 *pa_table = chan->pa_table[chain]; + u32 *data_L, *data_U; + int i, status = 0; + u32 *buf; + u32 reg; + + memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain])); + + buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + data_L = &buf[0]; + data_U = &buf[48]; + + REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY, + AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ); + + reg = AR_PHY_CHAN_INFO_TAB_0; + for (i = 0; i < 48; i++) + data_L[i] = REG_READ(ah, reg + (i << 2)); + + REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY, + AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ); + + for (i = 0; i < 48; i++) + data_U[i] = REG_READ(ah, reg + (i << 2)); + + if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain)) + status = -2; + + REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, + AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); + + kfree(buf); + + return status; +} +EXPORT_SYMBOL(ar9003_paprd_create_curve); + +int ar9003_paprd_init_table(struct ath_hw *ah) +{ + ar9003_paprd_setup_single_table(ah); + ar9003_paprd_get_gain_table(ah); + return 0; +} +EXPORT_SYMBOL(ar9003_paprd_init_table); + +bool ar9003_paprd_is_done(struct ath_hw *ah) +{ + return !!REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1, + AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); +} +EXPORT_SYMBOL(ar9003_paprd_is_done); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 80431a2f6dc1d941ef2376637a7cf32133192ff9..a753a431bb13f186b7587cb0b67c10e76699a7d2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -17,6 +17,28 @@ #include "hw.h" #include "ar9003_phy.h" +static const int firstep_table[] = +/* level: 0 1 2 3 4 5 6 7 8 */ + { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */ + +static const int cycpwrThr1_table[] = +/* level: 0 1 2 3 4 5 6 7 8 */ + { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */ + +/* + * register values to turn OFDM weak signal detection OFF + */ +static const int m1ThreshLow_off = 127; +static const int m2ThreshLow_off = 127; +static const int m1Thresh_off = 127; +static const int m2Thresh_off = 127; +static const int m2CountThr_off = 31; +static const int m2CountThrLow_off = 63; +static const int m1ThreshLowExt_off = 127; +static const int m2ThreshLowExt_off = 127; +static const int m1ThreshExt_off = 127; +static const int m2ThreshExt_off = 127; + /** * ar9003_hw_set_channel - set channel on single-chip device * @ah: atheros hardware structure @@ -94,7 +116,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) } /** - * ar9003_hw_spur_mitigate - convert baseband spur frequency + * ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency * @ah: atheros hardware structure * @chan: * @@ -521,15 +543,6 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah, u32 val = INI_RA(iniArr, i, column); REG_WRITE(ah, reg, val); - - /* - * Determine if this is a shift register value, and insert the - * configured delay if so. - */ - if (reg >= 0x16000 && reg < 0x17000 - && ah->config.analog_shiftreg) - udelay(100); - DO_DELAY(regWrites); } } @@ -732,71 +745,68 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, { struct ar5416AniState *aniState = ah->curani; struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_channel *chan = ah->curchan; + s32 value, value2; switch (cmd & ah->ani_function) { - case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{ - u32 level = param; - - if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { - ath_print(common, ATH_DBG_ANI, - "level out of range (%u > %u)\n", - level, - (unsigned)ARRAY_SIZE(ah->totalSizeDesired)); - return false; - } - - REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, - AR_PHY_DESIRED_SZ_TOT_DES, - ah->totalSizeDesired[level]); - REG_RMW_FIELD(ah, AR_PHY_AGC, - AR_PHY_AGC_COARSE_LOW, - ah->coarse_low[level]); - REG_RMW_FIELD(ah, AR_PHY_AGC, - AR_PHY_AGC_COARSE_HIGH, - ah->coarse_high[level]); - REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, - AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]); - - if (level > aniState->noiseImmunityLevel) - ah->stats.ast_ani_niup++; - else if (level < aniState->noiseImmunityLevel) - ah->stats.ast_ani_nidown++; - aniState->noiseImmunityLevel = level; - break; - } case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ - const int m1ThreshLow[] = { 127, 50 }; - const int m2ThreshLow[] = { 127, 40 }; - const int m1Thresh[] = { 127, 0x4d }; - const int m2Thresh[] = { 127, 0x40 }; - const int m2CountThr[] = { 31, 16 }; - const int m2CountThrLow[] = { 63, 48 }; + /* + * on == 1 means ofdm weak signal detection is ON + * on == 1 is the default, for less noise immunity + * + * on == 0 means ofdm weak signal detection is OFF + * on == 0 means more noise imm + */ u32 on = param ? 1 : 0; + /* + * make register setting for default + * (weak sig detect ON) come from INI file + */ + int m1ThreshLow = on ? + aniState->iniDef.m1ThreshLow : m1ThreshLow_off; + int m2ThreshLow = on ? + aniState->iniDef.m2ThreshLow : m2ThreshLow_off; + int m1Thresh = on ? + aniState->iniDef.m1Thresh : m1Thresh_off; + int m2Thresh = on ? + aniState->iniDef.m2Thresh : m2Thresh_off; + int m2CountThr = on ? + aniState->iniDef.m2CountThr : m2CountThr_off; + int m2CountThrLow = on ? + aniState->iniDef.m2CountThrLow : m2CountThrLow_off; + int m1ThreshLowExt = on ? + aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off; + int m2ThreshLowExt = on ? + aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off; + int m1ThreshExt = on ? + aniState->iniDef.m1ThreshExt : m1ThreshExt_off; + int m2ThreshExt = on ? + aniState->iniDef.m2ThreshExt : m2ThreshExt_off; REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M1_THRESH_LOW, - m1ThreshLow[on]); + m1ThreshLow); REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M2_THRESH_LOW, - m2ThreshLow[on]); + m2ThreshLow); REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]); + AR_PHY_SFCORR_M1_THRESH, m1Thresh); REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]); + AR_PHY_SFCORR_M2_THRESH, m2Thresh); REG_RMW_FIELD(ah, AR_PHY_SFCORR, - AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]); + AR_PHY_SFCORR_M2COUNT_THR, m2CountThr); REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, - m2CountThrLow[on]); + m2CountThrLow); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]); + AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]); + AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]); + AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt); REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, - AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]); + AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt); if (on) REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, @@ -806,6 +816,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); if (!on != aniState->ofdmWeakSigDetectOff) { + ath_print(common, ATH_DBG_ANI, + "** ch %d: ofdm weak signal: %s=>%s\n", + chan->channel, + !aniState->ofdmWeakSigDetectOff ? + "on" : "off", + on ? "on" : "off"); if (on) ah->stats.ast_ani_ofdmon++; else @@ -814,64 +830,167 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, } break; } - case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{ - const int weakSigThrCck[] = { 8, 6 }; - u32 high = param ? 1 : 0; - - REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, - AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK, - weakSigThrCck[high]); - if (high != aniState->cckWeakSigThreshold) { - if (high) - ah->stats.ast_ani_cckhigh++; - else - ah->stats.ast_ani_ccklow++; - aniState->cckWeakSigThreshold = high; - } - break; - } case ATH9K_ANI_FIRSTEP_LEVEL:{ - const int firstep[] = { 0, 4, 8 }; u32 level = param; - if (level >= ARRAY_SIZE(firstep)) { + if (level >= ARRAY_SIZE(firstep_table)) { ath_print(common, ATH_DBG_ANI, - "level out of range (%u > %u)\n", + "ATH9K_ANI_FIRSTEP_LEVEL: level " + "out of range (%u > %u)\n", level, - (unsigned) ARRAY_SIZE(firstep)); + (unsigned) ARRAY_SIZE(firstep_table)); return false; } + + /* + * make register setting relative to default + * from INI file & cap value + */ + value = firstep_table[level] - + firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + + aniState->iniDef.firstep; + if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN) + value = ATH9K_SIG_FIRSTEP_SETTING_MIN; + if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX) + value = ATH9K_SIG_FIRSTEP_SETTING_MAX; REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, AR_PHY_FIND_SIG_FIRSTEP, - firstep[level]); - if (level > aniState->firstepLevel) - ah->stats.ast_ani_stepup++; - else if (level < aniState->firstepLevel) - ah->stats.ast_ani_stepdown++; - aniState->firstepLevel = level; + value); + /* + * we need to set first step low register too + * make register setting relative to default + * from INI file & cap value + */ + value2 = firstep_table[level] - + firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + + aniState->iniDef.firstepLow; + if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN) + value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN; + if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX) + value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX; + + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, + AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2); + + if (level != aniState->firstepLevel) { + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "firstep[level]=%d ini=%d\n", + chan->channel, + aniState->firstepLevel, + level, + ATH9K_ANI_FIRSTEP_LVL_NEW, + value, + aniState->iniDef.firstep); + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "firstep_low[level]=%d ini=%d\n", + chan->channel, + aniState->firstepLevel, + level, + ATH9K_ANI_FIRSTEP_LVL_NEW, + value2, + aniState->iniDef.firstepLow); + if (level > aniState->firstepLevel) + ah->stats.ast_ani_stepup++; + else if (level < aniState->firstepLevel) + ah->stats.ast_ani_stepdown++; + aniState->firstepLevel = level; + } break; } case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ - const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 }; u32 level = param; - if (level >= ARRAY_SIZE(cycpwrThr1)) { + if (level >= ARRAY_SIZE(cycpwrThr1_table)) { ath_print(common, ATH_DBG_ANI, - "level out of range (%u > %u)\n", + "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level " + "out of range (%u > %u)\n", level, - (unsigned) ARRAY_SIZE(cycpwrThr1)); + (unsigned) ARRAY_SIZE(cycpwrThr1_table)); return false; } + /* + * make register setting relative to default + * from INI file & cap value + */ + value = cycpwrThr1_table[level] - + cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + + aniState->iniDef.cycpwrThr1; + if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN) + value = ATH9K_SIG_SPUR_IMM_SETTING_MIN; + if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX) + value = ATH9K_SIG_SPUR_IMM_SETTING_MAX; REG_RMW_FIELD(ah, AR_PHY_TIMING5, AR_PHY_TIMING5_CYCPWR_THR1, - cycpwrThr1[level]); - if (level > aniState->spurImmunityLevel) - ah->stats.ast_ani_spurup++; - else if (level < aniState->spurImmunityLevel) - ah->stats.ast_ani_spurdown++; - aniState->spurImmunityLevel = level; + value); + + /* + * set AR_PHY_EXT_CCA for extension channel + * make register setting relative to default + * from INI file & cap value + */ + value2 = cycpwrThr1_table[level] - + cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + + aniState->iniDef.cycpwrThr1Ext; + if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN) + value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN; + if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX) + value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX; + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA, + AR_PHY_EXT_CYCPWR_THR1, value2); + + if (level != aniState->spurImmunityLevel) { + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "cycpwrThr1[level]=%d ini=%d\n", + chan->channel, + aniState->spurImmunityLevel, + level, + ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, + value, + aniState->iniDef.cycpwrThr1); + ath_print(common, ATH_DBG_ANI, + "** ch %d: level %d=>%d[def:%d] " + "cycpwrThr1Ext[level]=%d ini=%d\n", + chan->channel, + aniState->spurImmunityLevel, + level, + ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, + value2, + aniState->iniDef.cycpwrThr1Ext); + if (level > aniState->spurImmunityLevel) + ah->stats.ast_ani_spurup++; + else if (level < aniState->spurImmunityLevel) + ah->stats.ast_ani_spurdown++; + aniState->spurImmunityLevel = level; + } break; } + case ATH9K_ANI_MRC_CCK:{ + /* + * is_on == 1 means MRC CCK ON (default, less noise imm) + * is_on == 0 means MRC CCK is OFF (more noise imm) + */ + bool is_on = param ? 1 : 0; + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, + AR_PHY_MRC_CCK_ENABLE, is_on); + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, + AR_PHY_MRC_CCK_MUX_REG, is_on); + if (!is_on != aniState->mrcCCKOff) { + ath_print(common, ATH_DBG_ANI, + "** ch %d: MRC CCK: %s=>%s\n", + chan->channel, + !aniState->mrcCCKOff ? "on" : "off", + is_on ? "on" : "off"); + if (is_on) + ah->stats.ast_ani_ccklow++; + else + ah->stats.ast_ani_cckhigh++; + aniState->mrcCCKOff = !is_on; + } + break; + } case ATH9K_ANI_PRESENT: break; default: @@ -880,158 +999,126 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, return false; } - ath_print(common, ATH_DBG_ANI, "ANI parameters:\n"); ath_print(common, ATH_DBG_ANI, - "noiseImmunityLevel=%d, spurImmunityLevel=%d, " - "ofdmWeakSigDetectOff=%d\n", - aniState->noiseImmunityLevel, + "ANI parameters: SI=%d, ofdmWS=%s FS=%d " + "MRCcck=%s listenTime=%d CC=%d listen=%d " + "ofdmErrs=%d cckErrs=%d\n", aniState->spurImmunityLevel, - !aniState->ofdmWeakSigDetectOff); - ath_print(common, ATH_DBG_ANI, - "cckWeakSigThreshold=%d, " - "firstepLevel=%d, listenTime=%d\n", - aniState->cckWeakSigThreshold, + !aniState->ofdmWeakSigDetectOff ? "on" : "off", aniState->firstepLevel, - aniState->listenTime); - ath_print(common, ATH_DBG_ANI, - "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", - aniState->cycleCount, - aniState->ofdmPhyErrCount, - aniState->cckPhyErrCount); - + !aniState->mrcCCKOff ? "on" : "off", + aniState->listenTime, + aniState->cycleCount, + aniState->listenTime, + aniState->ofdmPhyErrCount, + aniState->cckPhyErrCount); return true; } -static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (*nf > ah->nf_2g_max) { - ath_print(common, ATH_DBG_CALIBRATE, - "2 GHz NF (%d) > MAX (%d), " - "correcting to MAX", - *nf, ah->nf_2g_max); - *nf = ah->nf_2g_max; - } else if (*nf < ah->nf_2g_min) { - ath_print(common, ATH_DBG_CALIBRATE, - "2 GHz NF (%d) < MIN (%d), " - "correcting to MIN", - *nf, ah->nf_2g_min); - *nf = ah->nf_2g_min; - } -} - -static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf) -{ - struct ath_common *common = ath9k_hw_common(ah); - - if (*nf > ah->nf_5g_max) { - ath_print(common, ATH_DBG_CALIBRATE, - "5 GHz NF (%d) > MAX (%d), " - "correcting to MAX", - *nf, ah->nf_5g_max); - *nf = ah->nf_5g_max; - } else if (*nf < ah->nf_5g_min) { - ath_print(common, ATH_DBG_CALIBRATE, - "5 GHz NF (%d) < MIN (%d), " - "correcting to MIN", - *nf, ah->nf_5g_min); - *nf = ah->nf_5g_min; - } -} - -static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) -{ - if (IS_CHAN_2GHZ(ah->curchan)) - ar9003_hw_nf_sanitize_2g(ah, nf); - else - ar9003_hw_nf_sanitize_5g(ah, nf); -} - static void ar9003_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { - struct ath_common *common = ath9k_hw_common(ah); int16_t nf; nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ar9003_hw_nf_sanitize(ah, &nf); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 0] is %d\n", nf); - nfarray[0] = nf; + nfarray[0] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ar9003_hw_nf_sanitize(ah, &nf); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 1] is %d\n", nf); - nfarray[1] = nf; + nfarray[1] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ar9003_hw_nf_sanitize(ah, &nf); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ctl] [chain 2] is %d\n", nf); - nfarray[2] = nf; + nfarray[2] = sign_extend(nf, 9); + + if (!IS_CHAN_HT40(ah->curchan)) + return; nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ar9003_hw_nf_sanitize(ah, &nf); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 0] is %d\n", nf); - nfarray[3] = nf; + nfarray[3] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ar9003_hw_nf_sanitize(ah, &nf); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 1] is %d\n", nf); - nfarray[4] = nf; + nfarray[4] = sign_extend(nf, 9); nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR); - if (nf & 0x100) - nf = 0 - ((nf ^ 0x1ff) + 1); - ar9003_hw_nf_sanitize(ah, &nf); - ath_print(common, ATH_DBG_CALIBRATE, - "NF calibrated [ext] [chain 2] is %d\n", nf); - nfarray[5] = nf; + nfarray[5] = sign_extend(nf, 9); } -void ar9003_hw_set_nf_limits(struct ath_hw *ah) +static void ar9003_hw_set_nf_limits(struct ath_hw *ah) { - ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ; - ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ; - ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ; - ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ; + ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ; + ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ; + ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ; + ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ; + ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ; + ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9300_5GHZ; } /* - * Find out which of the RX chains are enabled + * Initialize the ANI register values with default (ini) values. + * This routine is called during a (full) hardware reset after + * all the registers are initialised from the INI. */ -static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah) +static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) { - u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK); - /* - * The bits [2:0] indicate the rx chain mask and are to be - * interpreted as follows: - * 00x => Only chain 0 is enabled - * 01x => Chain 1 and 0 enabled - * 1xx => Chain 2,1 and 0 enabled - */ - return chain & 0x7; + struct ar5416AniState *aniState; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_channel *chan = ah->curchan; + struct ath9k_ani_default *iniDef; + int index; + u32 val; + + index = ath9k_hw_get_ani_channel_idx(ah, chan); + aniState = &ah->ani[index]; + ah->curani = aniState; + iniDef = &aniState->iniDef; + + ath_print(common, ATH_DBG_ANI, + "ver %d.%d opmode %u chan %d Mhz/0x%x\n", + ah->hw_version.macVersion, + ah->hw_version.macRev, + ah->opmode, + chan->channel, + chan->channelFlags); + + val = REG_READ(ah, AR_PHY_SFCORR); + iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH); + iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH); + iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR); + + val = REG_READ(ah, AR_PHY_SFCORR_LOW); + iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW); + iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW); + iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW); + + val = REG_READ(ah, AR_PHY_SFCORR_EXT); + iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH); + iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH); + iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW); + iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW); + iniDef->firstep = REG_READ_FIELD(ah, + AR_PHY_FIND_SIG, + AR_PHY_FIND_SIG_FIRSTEP); + iniDef->firstepLow = REG_READ_FIELD(ah, + AR_PHY_FIND_SIG_LOW, + AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW); + iniDef->cycpwrThr1 = REG_READ_FIELD(ah, + AR_PHY_TIMING5, + AR_PHY_TIMING5_CYCPWR_THR1); + iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah, + AR_PHY_EXT_CCA, + AR_PHY_EXT_CYCPWR_THR1); + + /* these levels just got reset to defaults by the INI */ + aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; + aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; + aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; + aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK; + + aniState->cycleCount = 0; } -static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) +void ar9003_hw_attach_phy_ops(struct ath_hw *ah) { - struct ath9k_nfcal_hist *h; - unsigned i, j; - int32_t val; + struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); const u32 ar9300_cca_regs[6] = { AR_PHY_CCA_0, AR_PHY_CCA_1, @@ -1040,95 +1127,143 @@ static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) AR_PHY_EXT_CCA_1, AR_PHY_EXT_CCA_2, }; - u8 chainmask, rx_chain_status; - struct ath_common *common = ath9k_hw_common(ah); - rx_chain_status = ar9003_hw_get_rx_chainmask(ah); + priv_ops->rf_set_freq = ar9003_hw_set_channel; + priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate; + priv_ops->compute_pll_control = ar9003_hw_compute_pll_control; + priv_ops->set_channel_regs = ar9003_hw_set_channel_regs; + priv_ops->init_bb = ar9003_hw_init_bb; + priv_ops->process_ini = ar9003_hw_process_ini; + priv_ops->set_rfmode = ar9003_hw_set_rfmode; + priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive; + priv_ops->set_delta_slope = ar9003_hw_set_delta_slope; + priv_ops->rfbus_req = ar9003_hw_rfbus_req; + priv_ops->rfbus_done = ar9003_hw_rfbus_done; + priv_ops->enable_rfkill = ar9003_hw_enable_rfkill; + priv_ops->set_diversity = ar9003_hw_set_diversity; + priv_ops->ani_control = ar9003_hw_ani_control; + priv_ops->do_getnf = ar9003_hw_do_getnf; + priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs; - chainmask = 0x3F; - h = ah->nfCalHist; + ar9003_hw_set_nf_limits(ah); + memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs)); +} - for (i = 0; i < NUM_NF_READINGS; i++) { - if (chainmask & (1 << i)) { - val = REG_READ(ah, ar9300_cca_regs[i]); - val &= 0xFFFFFE00; - val |= (((u32) (h[i].privNF) << 1) & 0x1ff); - REG_WRITE(ah, ar9300_cca_regs[i], val); - } +void ar9003_hw_bb_watchdog_config(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 idle_tmo_ms = ah->bb_watchdog_timeout_ms; + u32 val, idle_count; + + if (!idle_tmo_ms) { + /* disable IRQ, disable chip-reset for BB panic */ + REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2, + REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & + ~(AR_PHY_WATCHDOG_RST_ENABLE | + AR_PHY_WATCHDOG_IRQ_ENABLE)); + + /* disable watchdog in non-IDLE mode, disable in IDLE mode */ + REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1, + REG_READ(ah, AR_PHY_WATCHDOG_CTL_1) & + ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE | + AR_PHY_WATCHDOG_IDLE_ENABLE)); + + ath_print(common, ATH_DBG_RESET, "Disabled BB Watchdog\n"); + return; } + /* enable IRQ, disable chip-reset for BB watchdog */ + val = REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & AR_PHY_WATCHDOG_CNTL2_MASK; + REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2, + (val | AR_PHY_WATCHDOG_IRQ_ENABLE) & + ~AR_PHY_WATCHDOG_RST_ENABLE); + + /* bound limit to 10 secs */ + if (idle_tmo_ms > 10000) + idle_tmo_ms = 10000; + /* - * Load software filtered NF value into baseband internal minCCApwr - * variable. + * The time unit for watchdog event is 2^15 44/88MHz cycles. + * + * For HT20 we have a time unit of 2^15/44 MHz = .74 ms per tick + * For HT40 we have a time unit of 2^15/88 MHz = .37 ms per tick + * + * Given we use fast clock now in 5 GHz, these time units should + * be common for both 2 GHz and 5 GHz. */ - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, - AR_PHY_AGC_CONTROL_ENABLE_NF); - REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, - AR_PHY_AGC_CONTROL_NO_UPDATE_NF); - REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + idle_count = (100 * idle_tmo_ms) / 74; + if (ah->curchan && IS_CHAN_HT40(ah->curchan)) + idle_count = (100 * idle_tmo_ms) / 37; /* - * Wait for load to complete, should be fast, a few 10s of us. - * The max delay was changed from an original 250us to 10000us - * since 250us often results in NF load timeout and causes deaf - * condition during stress testing 12/12/2009 + * enable watchdog in non-IDLE mode, disable in IDLE mode, + * set idle time-out. */ - for (j = 0; j < 1000; j++) { - if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & - AR_PHY_AGC_CONTROL_NF) == 0) - break; - udelay(10); - } + REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1, + AR_PHY_WATCHDOG_NON_IDLE_ENABLE | + AR_PHY_WATCHDOG_IDLE_MASK | + (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2))); + + ath_print(common, ATH_DBG_RESET, + "Enabled BB Watchdog timeout (%u ms)\n", + idle_tmo_ms); +} +void ar9003_hw_bb_watchdog_read(struct ath_hw *ah) +{ /* - * We timed out waiting for the noisefloor to load, probably due to an - * in-progress rx. Simply return here and allow the load plenty of time - * to complete before the next calibration interval. We need to avoid - * trying to load -50 (which happens below) while the previous load is - * still in progress as this can cause rx deafness. Instead by returning - * here, the baseband nf cal will just be capped by our present - * noisefloor until the next calibration timer. + * we want to avoid printing in ISR context so we save the + * watchdog status to be printed later in bottom half context. */ - if (j == 1000) { - ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf " - "to load: AR_PHY_AGC_CONTROL=0x%x\n", - REG_READ(ah, AR_PHY_AGC_CONTROL)); - return; - } + ah->bb_watchdog_last_status = REG_READ(ah, AR_PHY_WATCHDOG_STATUS); /* - * Restore maxCCAPower register parameter again so that we're not capped - * by the median we just loaded. This will be initial (and max) value - * of next noise floor calibration the baseband does. + * the watchdog timer should reset on status read but to be sure + * sure we write 0 to the watchdog status bit. */ - for (i = 0; i < NUM_NF_READINGS; i++) { - if (chainmask & (1 << i)) { - val = REG_READ(ah, ar9300_cca_regs[i]); - val &= 0xFFFFFE00; - val |= (((u32) (-50) << 1) & 0x1ff); - REG_WRITE(ah, ar9300_cca_regs[i], val); - } - } + REG_WRITE(ah, AR_PHY_WATCHDOG_STATUS, + ah->bb_watchdog_last_status & ~AR_PHY_WATCHDOG_STATUS_CLR); } -void ar9003_hw_attach_phy_ops(struct ath_hw *ah) +void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) { - struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); + struct ath_common *common = ath9k_hw_common(ah); + u32 rxc_pcnt = 0, rxf_pcnt = 0, txf_pcnt = 0, status; - priv_ops->rf_set_freq = ar9003_hw_set_channel; - priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate; - priv_ops->compute_pll_control = ar9003_hw_compute_pll_control; - priv_ops->set_channel_regs = ar9003_hw_set_channel_regs; - priv_ops->init_bb = ar9003_hw_init_bb; - priv_ops->process_ini = ar9003_hw_process_ini; - priv_ops->set_rfmode = ar9003_hw_set_rfmode; - priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive; - priv_ops->set_delta_slope = ar9003_hw_set_delta_slope; - priv_ops->rfbus_req = ar9003_hw_rfbus_req; - priv_ops->rfbus_done = ar9003_hw_rfbus_done; - priv_ops->enable_rfkill = ar9003_hw_enable_rfkill; - priv_ops->set_diversity = ar9003_hw_set_diversity; - priv_ops->ani_control = ar9003_hw_ani_control; - priv_ops->do_getnf = ar9003_hw_do_getnf; - priv_ops->loadnf = ar9003_hw_loadnf; + if (likely(!(common->debug_mask & ATH_DBG_RESET))) + return; + + status = ah->bb_watchdog_last_status; + ath_print(common, ATH_DBG_RESET, + "\n==== BB update: BB status=0x%08x ====\n", status); + ath_print(common, ATH_DBG_RESET, + "** BB state: wd=%u det=%u rdar=%u rOFDM=%d " + "rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n", + MS(status, AR_PHY_WATCHDOG_INFO), + MS(status, AR_PHY_WATCHDOG_DET_HANG), + MS(status, AR_PHY_WATCHDOG_RADAR_SM), + MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM), + MS(status, AR_PHY_WATCHDOG_RX_CCK_SM), + MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM), + MS(status, AR_PHY_WATCHDOG_TX_CCK_SM), + MS(status, AR_PHY_WATCHDOG_AGC_SM), + MS(status,AR_PHY_WATCHDOG_SRCH_SM)); + + ath_print(common, ATH_DBG_RESET, + "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n", + REG_READ(ah, AR_PHY_WATCHDOG_CTL_1), + REG_READ(ah, AR_PHY_WATCHDOG_CTL_2)); + ath_print(common, ATH_DBG_RESET, + "** BB mode: BB_gen_controls=0x%08x **\n", + REG_READ(ah, AR_PHY_GEN_CTRL)); + + if (ath9k_hw_GetMibCycleCountsPct(ah, &rxc_pcnt, &rxf_pcnt, &txf_pcnt)) + ath_print(common, ATH_DBG_RESET, + "** BB busy times: rx_clear=%d%%, " + "rx_frame=%d%%, tx_frame=%d%% **\n", + rxc_pcnt, rxf_pcnt, txf_pcnt); + + ath_print(common, ATH_DBG_RESET, + "==== BB update: done ====\n\n"); } +EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index f08cc8bda005c122fd4677c26a987eabe21c6bb9..3394dfe52b42052657208f46361e4a082b2954ce 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -149,6 +149,8 @@ #define AR_PHY_EXT_CCA_THRESH62_S 16 #define AR_PHY_EXT_MINCCA_PWR 0x01FF0000 #define AR_PHY_EXT_MINCCA_PWR_S 16 +#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L +#define AR_PHY_EXT_CYCPWR_THR1_S 9 #define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE #define AR_PHY_TIMING5_CYCPWR_THR1_S 1 #define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001 @@ -283,6 +285,12 @@ #define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00 #define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9 +#define AR_PHY_MRC_CCK_CTRL (AR_AGC_BASE + 0x1d0) +#define AR_PHY_MRC_CCK_ENABLE 0x00000001 +#define AR_PHY_MRC_CCK_ENABLE_S 0 +#define AR_PHY_MRC_CCK_MUX_REG 0x00000002 +#define AR_PHY_MRC_CCK_MUX_REG_S 1 + #define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200) #define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110 @@ -451,7 +459,11 @@ #define AR_PHY_TSTDAC (AR_SM_BASE + 0x168) #define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c) -#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170) + +#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170) +#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008 +#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3 + #define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174) #define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178) #define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c) @@ -467,30 +479,86 @@ #define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0) #define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4) -#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204) -#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208) -#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c) -#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220) -#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c) -#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240) +#define AR_PHY_TPC_1 (AR_SM_BASE + 0x1f8) +#define AR_PHY_TPC_1_FORCED_DAC_GAIN 0x0000003e +#define AR_PHY_TPC_1_FORCED_DAC_GAIN_S 1 +#define AR_PHY_TPC_1_FORCE_DAC_GAIN 0x00000001 +#define AR_PHY_TPC_1_FORCE_DAC_GAIN_S 0 + +#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204) +#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208) +#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c) + +#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220) +#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) +#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220) +#define AR_PHY_TPC_11_OLPC_GAIN_DELTA 0x00ff0000 +#define AR_PHY_TPC_11_OLPC_GAIN_DELTA_S 16 + +#define AR_PHY_TPC_12 (AR_SM_BASE + 0x224) +#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5 0x3e000000 +#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5_S 25 + +#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c) +#define AR_PHY_TPC_18_THERM_CAL_VALUE 0x000000ff +#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0 +#define AR_PHY_TPC_18_VOLT_CAL_VALUE 0x0000ff00 +#define AR_PHY_TPC_18_VOLT_CAL_VALUE_S 8 + +#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240) +#define AR_PHY_TPC_19_ALPHA_VOLT 0x001f0000 +#define AR_PHY_TPC_19_ALPHA_VOLT_S 16 +#define AR_PHY_TPC_19_ALPHA_THERM 0xff +#define AR_PHY_TPC_19_ALPHA_THERM_S 0 + +#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258) +#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN 0x00000001 +#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN_S 0 +#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN 0x0000000e +#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_S 1 +#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN 0x00000030 +#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_S 4 +#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN 0x000003c0 +#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN_S 6 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA 0x00003c00 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA_S 10 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB 0x0003c000 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB_S 14 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC 0x003c0000 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC_S 18 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND 0x00c00000 +#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND_S 22 +#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL 0x01000000 +#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL_S 24 -#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258) #define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280) +#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) + #define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448) #define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440) #define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c) #define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450) -#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0) -#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4) -#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8) -#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc) +#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0) +#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4) +#define AR_PHY_WATCHDOG_CTL_2 (AR_SM_BASE + 0x5c8) +#define AR_PHY_WATCHDOG_CTL (AR_SM_BASE + 0x5cc) #define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0) #define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4) #define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc) -#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248) + +#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248) +#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff +#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0 + +#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254) +#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff +#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0 +#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00 +#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8 + #define AR_PHY_65NM_CH0_SYNTH4 0x1608c #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002 @@ -660,17 +728,9 @@ #define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff #define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0 -#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff -#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0 -#define AR_PHY_TPC_19_ALPHA_THERM 0xff -#define AR_PHY_TPC_19_ALPHA_THERM_S 0 - #define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000 #define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28 -#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff -#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0 - /* * Channel 1 Register Map */ @@ -812,35 +872,173 @@ #define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i))) #define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i))) -#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001 -#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002 -#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000 -#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC - -#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002 -#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004 -#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9 - -#define AR_PHY_BB_WD_STATUS 0x00000007 -#define AR_PHY_BB_WD_STATUS_S 0 -#define AR_PHY_BB_WD_DET_HANG 0x00000008 -#define AR_PHY_BB_WD_DET_HANG_S 3 -#define AR_PHY_BB_WD_RADAR_SM 0x000000F0 -#define AR_PHY_BB_WD_RADAR_SM_S 4 -#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00 -#define AR_PHY_BB_WD_RX_OFDM_SM_S 8 -#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000 -#define AR_PHY_BB_WD_RX_CCK_SM_S 12 -#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000 -#define AR_PHY_BB_WD_TX_OFDM_SM_S 16 -#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000 -#define AR_PHY_BB_WD_TX_CCK_SM_S 20 -#define AR_PHY_BB_WD_AGC_SM 0x0F000000 -#define AR_PHY_BB_WD_AGC_SM_S 24 -#define AR_PHY_BB_WD_SRCH_SM 0xF0000000 -#define AR_PHY_BB_WD_SRCH_SM_S 28 - -#define AR_PHY_BB_WD_STATUS_CLR 0x00000008 +#define AR_PHY_WATCHDOG_NON_IDLE_ENABLE 0x00000001 +#define AR_PHY_WATCHDOG_IDLE_ENABLE 0x00000002 +#define AR_PHY_WATCHDOG_IDLE_MASK 0xFFFF0000 +#define AR_PHY_WATCHDOG_NON_IDLE_MASK 0x0000FFFC + +#define AR_PHY_WATCHDOG_RST_ENABLE 0x00000002 +#define AR_PHY_WATCHDOG_IRQ_ENABLE 0x00000004 +#define AR_PHY_WATCHDOG_CNTL2_MASK 0xFFFFFFF9 + +#define AR_PHY_WATCHDOG_INFO 0x00000007 +#define AR_PHY_WATCHDOG_INFO_S 0 +#define AR_PHY_WATCHDOG_DET_HANG 0x00000008 +#define AR_PHY_WATCHDOG_DET_HANG_S 3 +#define AR_PHY_WATCHDOG_RADAR_SM 0x000000F0 +#define AR_PHY_WATCHDOG_RADAR_SM_S 4 +#define AR_PHY_WATCHDOG_RX_OFDM_SM 0x00000F00 +#define AR_PHY_WATCHDOG_RX_OFDM_SM_S 8 +#define AR_PHY_WATCHDOG_RX_CCK_SM 0x0000F000 +#define AR_PHY_WATCHDOG_RX_CCK_SM_S 12 +#define AR_PHY_WATCHDOG_TX_OFDM_SM 0x000F0000 +#define AR_PHY_WATCHDOG_TX_OFDM_SM_S 16 +#define AR_PHY_WATCHDOG_TX_CCK_SM 0x00F00000 +#define AR_PHY_WATCHDOG_TX_CCK_SM_S 20 +#define AR_PHY_WATCHDOG_AGC_SM 0x0F000000 +#define AR_PHY_WATCHDOG_AGC_SM_S 24 +#define AR_PHY_WATCHDOG_SRCH_SM 0xF0000000 +#define AR_PHY_WATCHDOG_SRCH_SM_S 28 + +#define AR_PHY_WATCHDOG_STATUS_CLR 0x00000008 + +/* + * PAPRD registers + */ +#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64) + +#define AR_PHY_PAPRD_AM2AM (AR_CHAN_BASE + 0xe4) +#define AR_PHY_PAPRD_AM2AM_MASK 0x01ffffff +#define AR_PHY_PAPRD_AM2AM_MASK_S 0 + +#define AR_PHY_PAPRD_AM2PM (AR_CHAN_BASE + 0xe8) +#define AR_PHY_PAPRD_AM2PM_MASK 0x01ffffff +#define AR_PHY_PAPRD_AM2PM_MASK_S 0 + +#define AR_PHY_PAPRD_HT40 (AR_CHAN_BASE + 0xec) +#define AR_PHY_PAPRD_HT40_MASK 0x01ffffff +#define AR_PHY_PAPRD_HT40_MASK_S 0 + +#define AR_PHY_PAPRD_CTRL0_B0 (AR_CHAN_BASE + 0xf0) +#define AR_PHY_PAPRD_CTRL0_B1 (AR_CHAN1_BASE + 0xf0) +#define AR_PHY_PAPRD_CTRL0_B2 (AR_CHAN2_BASE + 0xf0) +#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE 0x00000001 +#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE_S 0 +#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK 0x00000002 +#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK_S 1 +#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH 0xf8000000 +#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH_S 27 + +#define AR_PHY_PAPRD_CTRL1_B0 (AR_CHAN_BASE + 0xf4) +#define AR_PHY_PAPRD_CTRL1_B1 (AR_CHAN1_BASE + 0xf4) +#define AR_PHY_PAPRD_CTRL1_B2 (AR_CHAN2_BASE + 0xf4) +#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA 0x00000001 +#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA_S 0 +#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE 0x00000002 +#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE_S 1 +#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE 0x00000004 +#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE_S 2 +#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL 0x000001f8 +#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_S 3 +#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK 0x0001fe00 +#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK_S 9 +#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000 +#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17 + +#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + 0x490) +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_S 1 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE 0x00000100 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_S 8 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE 0x00000200 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_S 9 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE 0x00000400 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_S 10 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE 0x00000800 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_S 11 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000 +#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12 + +#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + 0x494) +#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF +#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0 + +#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + 0x498) +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_S 6 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL 0x0001f000 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_S 12 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES 0x000e0000 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_S 17 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN 0x00f00000 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_S 20 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN 0x0f000000 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_S 24 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000 +#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29 + +#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + 0x49c) +#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000 +#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16 +#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000 +#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_S 12 +#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR 0x00000fff +#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_S 0 + +#define AR_PHY_PAPRD_PRE_POST_SCALE_0_B0 (AR_CHAN_BASE + 0x100) +#define AR_PHY_PAPRD_PRE_POST_SCALE_1_B0 (AR_CHAN_BASE + 0x104) +#define AR_PHY_PAPRD_PRE_POST_SCALE_2_B0 (AR_CHAN_BASE + 0x108) +#define AR_PHY_PAPRD_PRE_POST_SCALE_3_B0 (AR_CHAN_BASE + 0x10c) +#define AR_PHY_PAPRD_PRE_POST_SCALE_4_B0 (AR_CHAN_BASE + 0x110) +#define AR_PHY_PAPRD_PRE_POST_SCALE_5_B0 (AR_CHAN_BASE + 0x114) +#define AR_PHY_PAPRD_PRE_POST_SCALE_6_B0 (AR_CHAN_BASE + 0x118) +#define AR_PHY_PAPRD_PRE_POST_SCALE_7_B0 (AR_CHAN_BASE + 0x11c) +#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF +#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0 + +#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + 0x4a0) +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_S 1 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR 0x00000004 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_S 2 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE 0x00000008 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_S 3 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX 0x000001f0 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_S 4 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00 +#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9 + +#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + 0x4a4) +#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff +#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0 +#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000 +#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_S 16 +#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000 +#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21 + +#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + 0x4a8) +#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff +#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0 + +#define AR_PHY_PAPRD_MEM_TAB_B0 (AR_CHAN_BASE + 0x120) +#define AR_PHY_PAPRD_MEM_TAB_B1 (AR_CHAN1_BASE + 0x120) +#define AR_PHY_PAPRD_MEM_TAB_B2 (AR_CHAN2_BASE + 0x120) + +#define AR_PHY_PA_GAIN123_B0 (AR_CHAN_BASE + 0xf8) +#define AR_PHY_PA_GAIN123_B1 (AR_CHAN1_BASE + 0xf8) +#define AR_PHY_PA_GAIN123_B2 (AR_CHAN2_BASE + 0xf8) +#define AR_PHY_PA_GAIN123_PA_GAIN1 0x3FF +#define AR_PHY_PA_GAIN123_PA_GAIN1_S 0 + +#define AR_PHY_POWERTX_RATE5 (AR_SM_BASE + 0x1d0) +#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F +#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0 void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 5ea87736a6ae49e0fa5ffe764241b7f72722bb2f..998ae2c49ed27abb8cbe8a1fb8fe8b37adc0468c 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -20,6 +20,7 @@ #include #include #include +#include #include "debug.h" #include "common.h" @@ -136,6 +137,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_MAX_ANTENNA 3 #define ATH_RXBUF 512 #define ATH_TXBUF 512 +#define ATH_TXBUF_RESERVE 5 +#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE) #define ATH_TXMAXTRY 13 #define ATH_MGT_TXMAXTRY 4 @@ -192,6 +195,7 @@ enum ATH_AGGR_STATUS { #define ATH_TXFIFO_DEPTH 8 struct ath_txq { + int axq_class; u32 axq_qnum; u32 *axq_link; struct list_head axq_q; @@ -206,6 +210,71 @@ struct ath_txq { u8 txq_tailidx; }; +struct ath_atx_ac { + int sched; + int qnum; + struct list_head list; + struct list_head tid_q; +}; + +struct ath_buf_state { + int bfs_nframes; + u16 bfs_al; + u16 bfs_frmlen; + int bfs_seqno; + int bfs_tidno; + int bfs_retries; + u8 bf_type; + u8 bfs_paprd; + unsigned long bfs_paprd_timestamp; + u32 bfs_keyix; + enum ath9k_key_type bfs_keytype; +}; + +struct ath_buf { + struct list_head list; + struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or + an aggregate) */ + struct ath_buf *bf_next; /* next subframe in the aggregate */ + struct sk_buff *bf_mpdu; /* enclosing frame structure */ + void *bf_desc; /* virtual addr of desc */ + dma_addr_t bf_daddr; /* physical addr of desc */ + dma_addr_t bf_buf_addr; /* physical addr of data buffer */ + bool bf_stale; + bool bf_isnullfunc; + bool bf_tx_aborted; + u16 bf_flags; + struct ath_buf_state bf_state; + dma_addr_t bf_dmacontext; + struct ath_wiphy *aphy; +}; + +struct ath_atx_tid { + struct list_head list; + struct list_head buf_q; + struct ath_node *an; + struct ath_atx_ac *ac; + struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; + u16 seq_start; + u16 seq_next; + u16 baw_size; + int tidno; + int baw_head; /* first un-acked tx buffer */ + int baw_tail; /* next unused tx buffer slot */ + int sched; + int paused; + u8 state; +}; + +struct ath_node { + struct ath_common *common; + struct ath_atx_tid tid[WME_NUM_TID]; + struct ath_atx_ac ac[WME_NUM_AC]; + u16 maxampdu; + u8 mpdudensity; + int last_rssi; +}; + #define AGGR_CLEANUP BIT(1) #define AGGR_ADDBA_COMPLETE BIT(2) #define AGGR_ADDBA_PROGRESS BIT(3) @@ -214,6 +283,7 @@ struct ath_tx_control { struct ath_txq *txq; int if_id; enum ath9k_internal_frame_type frame_type; + u8 paprd; }; #define ATH_TX_ERROR 0x01 @@ -223,11 +293,12 @@ struct ath_tx_control { struct ath_tx { u16 seq_no; u32 txqsetup; - int hwq_map[ATH9K_WME_AC_VO+1]; + int hwq_map[WME_NUM_AC]; spinlock_t txbuflock; struct list_head txbuf; struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; struct ath_descdma txdma; + int pending_frames[WME_NUM_AC]; }; struct ath_rx_edma { @@ -267,7 +338,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an); void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); int ath_tx_init(struct ath_softc *sc, int nbufs); void ath_tx_cleanup(struct ath_softc *sc); -struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb); int ath_txq_update(struct ath_softc *sc, int qnum, struct ath9k_tx_queue_info *q); int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, @@ -351,10 +421,15 @@ int ath_beaconq_config(struct ath_softc *sc); #define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ #define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ -#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */ +#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */ +#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */ #define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ #define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ +#define ATH_PAPRD_TIMEOUT 100 /* msecs */ + +void ath_hw_check(struct work_struct *work); +void ath_paprd_calibrate(struct work_struct *work); void ath_ani_calibrate(unsigned long data); /**********/ @@ -487,6 +562,9 @@ struct ath_softc { spinlock_t sc_serial_rw; spinlock_t sc_pm_lock; struct mutex mutex; + struct work_struct paprd_work; + struct work_struct hw_check_work; + struct completion paprd_complete; u32 intrstatus; u32 sc_flags; /* SC_OP_* */ @@ -545,7 +623,6 @@ struct ath_wiphy { void ath9k_tasklet(unsigned long data); int ath_reset(struct ath_softc *sc, bool retry_tx); -int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); int ath_cabq_update(struct ath_softc *); @@ -556,13 +633,12 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) extern struct ieee80211_ops ath9k_ops; extern int modparam_nohwcrypt; +extern int led_blink; irqreturn_t ath_isr(int irq, void *dev); int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, const struct ath_bus_ops *bus_ops); void ath9k_deinit_device(struct ath_softc *sc); -const char *ath_mac_bb_name(u32 mac_bb_version); -const char *ath_rf_name(u16 rf_version); void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, struct ath9k_channel *ichan); @@ -611,9 +687,7 @@ bool ath9k_all_wiphys_idle(struct ath_softc *sc); void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle); void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue); -void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); - -int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); +bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); void ath_start_rfkill_poll(struct ath_softc *sc); extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index f43d85a302c461e52080a8f8021e835d5874b8d3..4d4b22d52dfd230a7a61b3f0dd8b5b612c38af1f 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -38,8 +38,7 @@ int ath_beaconq_config(struct ath_softc *sc) qi.tqi_cwmax = 0; } else { /* Adhoc mode; important thing is to use 2x cwmin. */ - qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, - ATH9K_WME_AC_BE); + qnum = sc->tx.hwq_map[WME_AC_BE]; ath9k_hw_get_txq_props(ah, qnum, &qi_be); qi.tqi_aifs = qi_be.tqi_aifs; qi.tqi_cwmin = 4*qi_be.tqi_cwmin; diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 07b8fa6fb62f3813edfcae71c97c24d62d736a5f..139289e4e933dfba2345d74add499174274b0571 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -74,13 +74,8 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h, h[i].currIndex = 0; if (h[i].invalidNFcount > 0) { - if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE || - nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) { - h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX; - } else { - h[i].invalidNFcount--; - h[i].privNF = nfarray[i]; - } + h[i].invalidNFcount--; + h[i].privNF = nfarray[i]; } else { h[i].privNF = ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); @@ -172,6 +167,116 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah) REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); } +void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) +{ + struct ath9k_nfcal_hist *h; + unsigned i, j; + int32_t val; + u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; + struct ath_common *common = ath9k_hw_common(ah); + + h = ah->nfCalHist; + + for (i = 0; i < NUM_NF_READINGS; i++) { + if (chainmask & (1 << i)) { + val = REG_READ(ah, ah->nf_regs[i]); + val &= 0xFFFFFE00; + val |= (((u32) (h[i].privNF) << 1) & 0x1ff); + REG_WRITE(ah, ah->nf_regs[i], val); + } + } + + /* + * Load software filtered NF value into baseband internal minCCApwr + * variable. + */ + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_ENABLE_NF); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + + /* + * Wait for load to complete, should be fast, a few 10s of us. + * The max delay was changed from an original 250us to 10000us + * since 250us often results in NF load timeout and causes deaf + * condition during stress testing 12/12/2009 + */ + for (j = 0; j < 1000; j++) { + if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & + AR_PHY_AGC_CONTROL_NF) == 0) + break; + udelay(10); + } + + /* + * We timed out waiting for the noisefloor to load, probably due to an + * in-progress rx. Simply return here and allow the load plenty of time + * to complete before the next calibration interval. We need to avoid + * trying to load -50 (which happens below) while the previous load is + * still in progress as this can cause rx deafness. Instead by returning + * here, the baseband nf cal will just be capped by our present + * noisefloor until the next calibration timer. + */ + if (j == 1000) { + ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf " + "to load: AR_PHY_AGC_CONTROL=0x%x\n", + REG_READ(ah, AR_PHY_AGC_CONTROL)); + return; + } + + /* + * Restore maxCCAPower register parameter again so that we're not capped + * by the median we just loaded. This will be initial (and max) value + * of next noise floor calibration the baseband does. + */ + ENABLE_REGWRITE_BUFFER(ah); + for (i = 0; i < NUM_NF_READINGS; i++) { + if (chainmask & (1 << i)) { + val = REG_READ(ah, ah->nf_regs[i]); + val &= 0xFFFFFE00; + val |= (((u32) (-50) << 1) & 0x1ff); + REG_WRITE(ah, ah->nf_regs[i], val); + } + } + REGWRITE_BUFFER_FLUSH(ah); + DISABLE_REGWRITE_BUFFER(ah); +} + + +static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath_nf_limits *limit; + int i; + + if (IS_CHAN_2GHZ(ah->curchan)) + limit = &ah->nf_2g; + else + limit = &ah->nf_5g; + + for (i = 0; i < NUM_NF_READINGS; i++) { + if (!nf[i]) + continue; + + ath_print(common, ATH_DBG_CALIBRATE, + "NF calibrated [%s] [chain %d] is %d\n", + (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); + + if (nf[i] > limit->max) { + ath_print(common, ATH_DBG_CALIBRATE, + "NF[%d] (%d) > MAX (%d), correcting to MAX", + i, nf[i], limit->max); + nf[i] = limit->max; + } else if (nf[i] < limit->min) { + ath_print(common, ATH_DBG_CALIBRATE, + "NF[%d] (%d) < MIN (%d), correcting to NOM", + i, nf[i], limit->min); + nf[i] = limit->nominal; + } + } +} + int16_t ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -190,6 +295,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah, return chan->rawNoiseFloor; } else { ath9k_hw_do_getnf(ah, nfarray); + ath9k_hw_nf_sanitize(ah, nfarray); nf = nfarray[0]; if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) && nf > nfThresh) { @@ -211,25 +317,21 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah, void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah) { + struct ath_nf_limits *limit; int i, j; - s16 noise_floor; - - if (AR_SREV_9280(ah)) - noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE; - else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) - noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE; - else if (AR_SREV_9287(ah)) - noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE; + + if (!ah->curchan || IS_CHAN_2GHZ(ah->curchan)) + limit = &ah->nf_2g; else - noise_floor = AR_PHY_CCA_MAX_AR5416_GOOD_VALUE; + limit = &ah->nf_5g; for (i = 0; i < NUM_NF_READINGS; i++) { ah->nfCalHist[i].currIndex = 0; - ah->nfCalHist[i].privNF = noise_floor; + ah->nfCalHist[i].privNF = limit->nominal; ah->nfCalHist[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH; for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { - ah->nfCalHist[i].nfCalBuffer[j] = noise_floor; + ah->nfCalHist[i].nfCalBuffer[j] = limit->nominal; } } } diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h index 24538bdb9126c9239402206275bdbefddb1a4451..cd60d09cdda7646ad7c9db67a9aa3cb293649dae 100644 --- a/drivers/net/wireless/ath/ath9k/calib.h +++ b/drivers/net/wireless/ath/ath9k/calib.h @@ -19,12 +19,6 @@ #include "hw.h" -#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85 -#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112 -#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118 -#define AR_PHY_CCA_MAX_AR9287_GOOD_VALUE -118 -#define AR_PHY_CCA_MAX_HIGH_VALUE -62 -#define AR_PHY_CCA_MIN_BAD_VALUE -140 #define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3 #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 @@ -115,6 +109,7 @@ struct ath9k_pacal_info{ bool ath9k_hw_reset_calvalid(struct ath_hw *ah); void ath9k_hw_start_nfcal(struct ath_hw *ah); +void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); int16_t ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan); void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index 7707341cd0d3a70bbdee237fe8eccdf3eaf95447..c86f7d3593ab48a3c1c1248443f0f2f599a327f8 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -27,270 +27,6 @@ MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); -/* Common RX processing */ - -/* Assumes you've already done the endian to CPU conversion */ -static bool ath9k_rx_accept(struct ath_common *common, - struct sk_buff *skb, - struct ieee80211_rx_status *rxs, - struct ath_rx_status *rx_stats, - bool *decrypt_error) -{ - struct ath_hw *ah = common->ah; - struct ieee80211_hdr *hdr; - __le16 fc; - - hdr = (struct ieee80211_hdr *) skb->data; - fc = hdr->frame_control; - - if (!rx_stats->rs_datalen) - return false; - /* - * rs_status follows rs_datalen so if rs_datalen is too large - * we can take a hint that hardware corrupted it, so ignore - * those frames. - */ - if (rx_stats->rs_datalen > common->rx_bufsize) - return false; - - /* - * rs_more indicates chained descriptors which can be used - * to link buffers together for a sort of scatter-gather - * operation. - * reject the frame, we don't support scatter-gather yet and - * the frame is probably corrupt anyway - */ - if (rx_stats->rs_more) - return false; - - /* - * The rx_stats->rs_status will not be set until the end of the - * chained descriptors so it can be ignored if rs_more is set. The - * rs_more will be false at the last element of the chained - * descriptors. - */ - if (rx_stats->rs_status != 0) { - if (rx_stats->rs_status & ATH9K_RXERR_CRC) - rxs->flag |= RX_FLAG_FAILED_FCS_CRC; - if (rx_stats->rs_status & ATH9K_RXERR_PHY) - return false; - - if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { - *decrypt_error = true; - } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { - if (ieee80211_is_ctl(fc)) - /* - * Sometimes, we get invalid - * MIC failures on valid control frames. - * Remove these mic errors. - */ - rx_stats->rs_status &= ~ATH9K_RXERR_MIC; - else - rxs->flag |= RX_FLAG_MMIC_ERROR; - } - /* - * Reject error frames with the exception of - * decryption and MIC failures. For monitor mode, - * we also ignore the CRC error. - */ - if (ah->opmode == NL80211_IFTYPE_MONITOR) { - if (rx_stats->rs_status & - ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | - ATH9K_RXERR_CRC)) - return false; - } else { - if (rx_stats->rs_status & - ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { - return false; - } - } - } - return true; -} - -static int ath9k_process_rate(struct ath_common *common, - struct ieee80211_hw *hw, - struct ath_rx_status *rx_stats, - struct ieee80211_rx_status *rxs, - struct sk_buff *skb) -{ - struct ieee80211_supported_band *sband; - enum ieee80211_band band; - unsigned int i = 0; - - band = hw->conf.channel->band; - sband = hw->wiphy->bands[band]; - - if (rx_stats->rs_rate & 0x80) { - /* HT rate */ - rxs->flag |= RX_FLAG_HT; - if (rx_stats->rs_flags & ATH9K_RX_2040) - rxs->flag |= RX_FLAG_40MHZ; - if (rx_stats->rs_flags & ATH9K_RX_GI) - rxs->flag |= RX_FLAG_SHORT_GI; - rxs->rate_idx = rx_stats->rs_rate & 0x7f; - return 0; - } - - for (i = 0; i < sband->n_bitrates; i++) { - if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { - rxs->rate_idx = i; - return 0; - } - if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { - rxs->flag |= RX_FLAG_SHORTPRE; - rxs->rate_idx = i; - return 0; - } - } - - /* - * No valid hardware bitrate found -- we should not get here - * because hardware has already validated this frame as OK. - */ - ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " - "0x%02x using 1 Mbit\n", rx_stats->rs_rate); - if ((common->debug_mask & ATH_DBG_XMIT)) - print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len); - - return -EINVAL; -} - -static void ath9k_process_rssi(struct ath_common *common, - struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ath_rx_status *rx_stats) -{ - struct ath_hw *ah = common->ah; - struct ieee80211_sta *sta; - struct ieee80211_hdr *hdr; - struct ath_node *an; - int last_rssi = ATH_RSSI_DUMMY_MARKER; - __le16 fc; - - hdr = (struct ieee80211_hdr *)skb->data; - fc = hdr->frame_control; - - rcu_read_lock(); - /* - * XXX: use ieee80211_find_sta! This requires quite a bit of work - * under the current ath9k virtual wiphy implementation as we have - * no way of tying a vif to wiphy. Typically vifs are attached to - * at least one sdata of a wiphy on mac80211 but with ath9k virtual - * wiphy you'd have to iterate over every wiphy and each sdata. - */ - sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); - if (sta) { - an = (struct ath_node *) sta->drv_priv; - if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && - !rx_stats->rs_moreaggr) - ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); - last_rssi = an->last_rssi; - } - rcu_read_unlock(); - - if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) - rx_stats->rs_rssi = ATH_EP_RND(last_rssi, - ATH_RSSI_EP_MULTIPLIER); - if (rx_stats->rs_rssi < 0) - rx_stats->rs_rssi = 0; - - /* Update Beacon RSSI, this is used by ANI. */ - if (ieee80211_is_beacon(fc)) - ah->stats.avgbrssi = rx_stats->rs_rssi; -} - -/* - * For Decrypt or Demic errors, we only mark packet status here and always push - * up the frame up to let mac80211 handle the actual error case, be it no - * decryption key or real decryption error. This let us keep statistics there. - */ -int ath9k_cmn_rx_skb_preprocess(struct ath_common *common, - struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ath_rx_status *rx_stats, - struct ieee80211_rx_status *rx_status, - bool *decrypt_error) -{ - struct ath_hw *ah = common->ah; - - memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); - - /* - * everything but the rate is checked here, the rate check is done - * separately to avoid doing two lookups for a rate for each frame. - */ - if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error)) - return -EINVAL; - - ath9k_process_rssi(common, hw, skb, rx_stats); - - if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb)) - return -EINVAL; - - rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp); - rx_status->band = hw->conf.channel->band; - rx_status->freq = hw->conf.channel->center_freq; - rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; - rx_status->antenna = rx_stats->rs_antenna; - rx_status->flag |= RX_FLAG_TSFT; - - return 0; -} -EXPORT_SYMBOL(ath9k_cmn_rx_skb_preprocess); - -void ath9k_cmn_rx_skb_postprocess(struct ath_common *common, - struct sk_buff *skb, - struct ath_rx_status *rx_stats, - struct ieee80211_rx_status *rxs, - bool decrypt_error) -{ - struct ath_hw *ah = common->ah; - struct ieee80211_hdr *hdr; - int hdrlen, padpos, padsize; - u8 keyix; - __le16 fc; - - /* see if any padding is done by the hw and remove it */ - hdr = (struct ieee80211_hdr *) skb->data; - hdrlen = ieee80211_get_hdrlen_from_skb(skb); - fc = hdr->frame_control; - padpos = ath9k_cmn_padpos(hdr->frame_control); - - /* The MAC header is padded to have 32-bit boundary if the - * packet payload is non-zero. The general calculation for - * padsize would take into account odd header lengths: - * padsize = (4 - padpos % 4) % 4; However, since only - * even-length headers are used, padding can only be 0 or 2 - * bytes and we can optimize this a bit. In addition, we must - * not try to remove padding from short control frames that do - * not have payload. */ - padsize = padpos & 3; - if (padsize && skb->len>=padpos+padsize+FCS_LEN) { - memmove(skb->data + padsize, skb->data, padpos); - skb_pull(skb, padsize); - } - - keyix = rx_stats->rs_keyix; - - if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && - ieee80211_has_protected(fc)) { - rxs->flag |= RX_FLAG_DECRYPTED; - } else if (ieee80211_has_protected(fc) - && !decrypt_error && skb->len >= hdrlen + 4) { - keyix = skb->data[hdrlen + 3] >> 6; - - if (test_bit(keyix, common->keymap)) - rxs->flag |= RX_FLAG_DECRYPTED; - } - if (ah->sw_mgmt_crypto && - (rxs->flag & RX_FLAG_DECRYPTED) && - ieee80211_is_mgmt(fc)) - /* Use software decrypt for management frames. */ - rxs->flag &= ~RX_FLAG_DECRYPTED; -} -EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess); - int ath9k_cmn_padpos(__le16 frame_control) { int padpos = 24; @@ -475,10 +211,14 @@ static int ath_reserve_key_cache_slot_tkip(struct ath_common *common) return -1; } -static int ath_reserve_key_cache_slot(struct ath_common *common) +static int ath_reserve_key_cache_slot(struct ath_common *common, + enum ieee80211_key_alg alg) { int i; + if (alg == ALG_TKIP) + return ath_reserve_key_cache_slot_tkip(common); + /* First, try to find slots that would not be available for TKIP. */ if (common->splitmic) { for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) { @@ -547,6 +287,7 @@ int ath9k_cmn_key_config(struct ath_common *common, struct ath_hw *ah = common->ah; struct ath9k_keyval hk; const u8 *mac = NULL; + u8 gmac[ETH_ALEN]; int ret = 0; int idx; @@ -570,9 +311,27 @@ int ath9k_cmn_key_config(struct ath_common *common, memcpy(hk.kv_val, key->key, key->keylen); if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { - /* For now, use the default keys for broadcast keys. This may - * need to change with virtual interfaces. */ - idx = key->keyidx; + switch (vif->type) { + case NL80211_IFTYPE_AP: + memcpy(gmac, vif->addr, ETH_ALEN); + gmac[0] |= 0x01; + mac = gmac; + idx = ath_reserve_key_cache_slot(common, key->alg); + break; + case NL80211_IFTYPE_ADHOC: + if (!sta) { + idx = key->keyidx; + break; + } + memcpy(gmac, sta->addr, ETH_ALEN); + gmac[0] |= 0x01; + mac = gmac; + idx = ath_reserve_key_cache_slot(common, key->alg); + break; + default: + idx = key->keyidx; + break; + } } else if (key->keyidx) { if (WARN_ON(!sta)) return -EOPNOTSUPP; @@ -589,14 +348,12 @@ int ath9k_cmn_key_config(struct ath_common *common, return -EOPNOTSUPP; mac = sta->addr; - if (key->alg == ALG_TKIP) - idx = ath_reserve_key_cache_slot_tkip(common); - else - idx = ath_reserve_key_cache_slot(common); - if (idx < 0) - return -ENOSPC; /* no free key cache entries */ + idx = ath_reserve_key_cache_slot(common, key->alg); } + if (idx < 0) + return -ENOSPC; /* no free key cache entries */ + if (key->alg == ALG_TKIP) ret = ath_setkey_tkip(common, idx, key->key, &hk, mac, vif->type == NL80211_IFTYPE_AP); @@ -644,6 +401,19 @@ void ath9k_cmn_key_delete(struct ath_common *common, } EXPORT_SYMBOL(ath9k_cmn_key_delete); +int ath9k_cmn_count_streams(unsigned int chainmask, int max) +{ + int streams = 0; + + do { + if (++streams == max) + break; + } while ((chainmask = chainmask & (chainmask - 1))); + + return streams; +} +EXPORT_SYMBOL(ath9k_cmn_count_streams); + static int __init ath9k_cmn_init(void) { return 0; diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index e08f7e5a26e0e90072a36c5ac46a311907e7a8a3..97809d39c73fc81f1eb59ea14879a1d9618fb404 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -52,82 +52,6 @@ #define ATH_EP_RND(x, mul) \ ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) -struct ath_atx_ac { - int sched; - int qnum; - struct list_head list; - struct list_head tid_q; -}; - -struct ath_buf_state { - int bfs_nframes; - u16 bfs_al; - u16 bfs_frmlen; - int bfs_seqno; - int bfs_tidno; - int bfs_retries; - u8 bf_type; - u32 bfs_keyix; - enum ath9k_key_type bfs_keytype; -}; - -struct ath_buf { - struct list_head list; - struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or - an aggregate) */ - struct ath_buf *bf_next; /* next subframe in the aggregate */ - struct sk_buff *bf_mpdu; /* enclosing frame structure */ - void *bf_desc; /* virtual addr of desc */ - dma_addr_t bf_daddr; /* physical addr of desc */ - dma_addr_t bf_buf_addr; /* physical addr of data buffer */ - bool bf_stale; - bool bf_isnullfunc; - bool bf_tx_aborted; - u16 bf_flags; - struct ath_buf_state bf_state; - dma_addr_t bf_dmacontext; - struct ath_wiphy *aphy; -}; - -struct ath_atx_tid { - struct list_head list; - struct list_head buf_q; - struct ath_node *an; - struct ath_atx_ac *ac; - struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; - u16 seq_start; - u16 seq_next; - u16 baw_size; - int tidno; - int baw_head; /* first un-acked tx buffer */ - int baw_tail; /* next unused tx buffer slot */ - int sched; - int paused; - u8 state; -}; - -struct ath_node { - struct ath_common *common; - struct ath_atx_tid tid[WME_NUM_TID]; - struct ath_atx_ac ac[WME_NUM_AC]; - u16 maxampdu; - u8 mpdudensity; - int last_rssi; -}; - -int ath9k_cmn_rx_skb_preprocess(struct ath_common *common, - struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ath_rx_status *rx_stats, - struct ieee80211_rx_status *rx_status, - bool *decrypt_error); - -void ath9k_cmn_rx_skb_postprocess(struct ath_common *common, - struct sk_buff *skb, - struct ath_rx_status *rx_stats, - struct ieee80211_rx_status *rxs, - bool decrypt_error); - int ath9k_cmn_padpos(__le16 frame_control); int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, @@ -140,3 +64,4 @@ int ath9k_cmn_key_config(struct ath_common *common, struct ieee80211_key_conf *key); void ath9k_cmn_key_delete(struct ath_common *common, struct ieee80211_key_conf *key); +int ath9k_cmn_count_streams(unsigned int chainmask, int max); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 29898f8d1893606627abdeb601966dc213cc61b8..54aae931424e69aa5cecc534ee0b8f03303383c7 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -42,7 +42,7 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, char buf[32]; unsigned int len; - len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask); + len = sprintf(buf, "0x%08x\n", common->debug_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -57,7 +57,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf, len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) - return -EINVAL; + return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &mask)) @@ -86,7 +86,7 @@ static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf, char buf[32]; unsigned int len; - len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask); + len = sprintf(buf, "0x%08x\n", common->tx_chainmask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -101,7 +101,7 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) - return -EINVAL; + return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &mask)) @@ -128,7 +128,7 @@ static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf, char buf[32]; unsigned int len; - len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask); + len = sprintf(buf, "0x%08x\n", common->rx_chainmask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -143,7 +143,7 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) - return -EINVAL; + return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, &mask)) @@ -176,7 +176,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf, buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL); if (!buf) - return 0; + return -ENOMEM; ath9k_ps_wakeup(sc); @@ -248,6 +248,9 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf, ath9k_ps_restore(sc); + if (len > DMA_BUF_LEN) + len = DMA_BUF_LEN; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; @@ -269,6 +272,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) sc->debug.stats.istats.rxlp++; if (status & ATH9K_INT_RXHP) sc->debug.stats.istats.rxhp++; + if (status & ATH9K_INT_BB_WATCHDOG) + sc->debug.stats.istats.bb_watchdog++; } else { if (status & ATH9K_INT_RX) sc->debug.stats.istats.rxok++; @@ -319,6 +324,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp); len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "WATCHDOG", + sc->debug.stats.istats.bb_watchdog); } else { len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); @@ -358,6 +366,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, len += snprintf(buf + len, sizeof(buf) - len, "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -397,11 +408,10 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, if (sc->cur_rate_table == NULL) return 0; - max = 80 + sc->cur_rate_table->rate_cnt * 1024; - buf = kmalloc(max + 1, GFP_KERNEL); + max = 80 + sc->cur_rate_table->rate_cnt * 1024 + 1; + buf = kmalloc(max, GFP_KERNEL); if (buf == NULL) - return 0; - buf[max] = 0; + return -ENOMEM; len += sprintf(buf, "%6s %6s %6s " "%10s %10s %10s %10s\n", @@ -443,6 +453,9 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, stats->per); } + if (len > max) + len = max; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; @@ -505,6 +518,9 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, len += snprintf(buf + len, sizeof(buf) - len, "addrmask: %pM\n", addr); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -614,10 +630,10 @@ static const struct file_operations fops_wiphy = { do { \ len += snprintf(buf + len, size - len, \ "%s%13u%11u%10u%10u\n", str, \ - sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BE]].elem, \ - sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BK]].elem, \ - sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VI]].elem, \ - sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VO]].elem); \ + sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \ + sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \ + sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \ + sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \ } while(0) static ssize_t read_file_xmit(struct file *file, char __user *user_buf, @@ -630,7 +646,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) - return 0; + return -ENOMEM; len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); @@ -648,6 +664,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, PR("DATA Underrun: ", data_underrun); PR("DELIM Underrun: ", delim_underrun); + if (len > size) + len = size; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); @@ -700,7 +719,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) - return 0; + return -ENOMEM; len += snprintf(buf + len, size - len, "%18s : %10u\n", "CRC ERR", @@ -751,6 +770,9 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL); + if (len > size) + len = size; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); @@ -802,7 +824,7 @@ static ssize_t read_file_regidx(struct file *file, char __user *user_buf, char buf[32]; unsigned int len; - len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx); + len = sprintf(buf, "0x%08x\n", sc->debug.regidx); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -816,7 +838,7 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf, len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) - return -EINVAL; + return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, ®idx)) @@ -843,7 +865,7 @@ static ssize_t read_file_regval(struct file *file, char __user *user_buf, u32 regval; regval = REG_READ_D(ah, sc->debug.regidx); - len = snprintf(buf, sizeof(buf), "0x%08x\n", regval); + len = sprintf(buf, "0x%08x\n", regval); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -858,7 +880,7 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf, len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) - return -EINVAL; + return -EFAULT; buf[len] = '\0'; if (strict_strtoul(buf, 0, ®val)) @@ -934,6 +956,10 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, sc, &fops_regval)) goto err; + if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca)) + goto err; + sc->debug.regidx = 0; return 0; err: diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 5147b8709e103bf0811390b9c0989f8485b9c361..5d21704e87fff40c078382e1010f4b1fc529efcf 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -53,6 +53,7 @@ struct ath_buf; * @cabend: RX End of CAB traffic * @dtimsync: DTIM sync lossage * @dtim: RX Beacon with DTIM + * @bb_watchdog: Baseband watchdog */ struct ath_interrupt_stats { u32 total; @@ -76,6 +77,7 @@ struct ath_interrupt_stats { u32 cabend; u32 dtimsync; u32 dtim; + u32 bb_watchdog; }; struct ath_rc_stats { diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index ca8704a9d7ac6208bedec668b790e2c55f149001..1266333f586d8f574cf8fcda7086f036f3d56403 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -24,6 +24,14 @@ static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); } +void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) +{ + REG_WRITE(ah, reg, val); + + if (ah->config.analog_shiftreg) + udelay(100); +} + void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, u32 shift, u32 val) { @@ -250,6 +258,27 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, return twiceMaxEdgePower; } +void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + + switch (ar5416_get_ntxchains(ah->txchainmask)) { + case 1: + break; + case 2: + regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; + break; + case 3: + regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; + break; + default: + ath_print(common, ATH_DBG_EEPROM, + "Invalid chainmask configuration\n"); + break; + } +} + int ath9k_hw_eeprom_init(struct ath_hw *ah) { int status; diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 21354c15a9a9b1181fd2eaa6d7933c5706019b47..8750c558c221658302c18fabff9a25f83906b730 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -263,7 +263,8 @@ enum eeprom_param { EEP_PWR_TABLE_OFFSET, EEP_DRIVE_STRENGTH, EEP_INTERNAL_REGULATOR, - EEP_SWREG + EEP_SWREG, + EEP_PAPRD, }; enum ar5416_rates { @@ -669,7 +670,7 @@ struct eeprom_ops { int (*get_eeprom_ver)(struct ath_hw *hw); int (*get_eeprom_rev)(struct ath_hw *hw); u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); - u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, + u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, struct ath9k_channel *chan); void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); @@ -679,6 +680,7 @@ struct eeprom_ops { u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz); }; +void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val); void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, u32 shift, u32 val); int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, @@ -704,6 +706,7 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah, u16 numRates, bool isHt40Target); u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, bool is2GHz, int num_band_edges); +void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah); int ath9k_hw_eeprom_init(struct ath_hw *ah); #define ar5416_get_ntxchains(_txchainmask) \ diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 41a77d1bd43981a4e39a3b40bae55c2a1ae63664..9cccd12e8f2131aa176740b7cc9e940d6e3a582f 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -222,7 +222,7 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah, struct ath9k_channel *chan, struct cal_data_per_freq_4k *pRawDataSet, u8 *bChans, u16 availPiers, - u16 tPdGainOverlap, int16_t *pMinCalPower, + u16 tPdGainOverlap, u16 *pPdGainBoundaries, u8 *pPDADCValues, u16 numXpdGains) { @@ -249,6 +249,7 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah, struct chan_centers centers; #define PD_GAIN_BOUNDARY_DEFAULT 58; + memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS); ath9k_hw_get_channel_centers(ah, chan, ¢ers); for (numPiers = 0; numPiers < availPiers; numPiers++) { @@ -307,8 +308,6 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah, } } - *pMinCalPower = (int16_t)(minPwrT4[0] / 2); - k = 0; for (i = 0; i < numXpdGains; i++) { @@ -398,7 +397,6 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; u16 gainBoundaries[AR5416_EEP4K_PD_GAINS_IN_MASK]; u16 numPiers, i, j; - int16_t tMinCalPower; u16 numXpdGain, xpdMask; u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; u32 reg32, regOffset, regChainOffset; @@ -451,7 +449,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, ath9k_hw_get_4k_gain_boundaries_pdadcs(ah, chan, pRawDataset, pCalBChans, numPiers, pdGainOverlap_t2, - &tMinCalPower, gainBoundaries, + gainBoundaries, pdadcValues, numXpdGain); ENABLE_REGWRITE_BUFFER(ah); @@ -1149,13 +1147,13 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, } } -static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; - return pModal->antCtrlCommon & 0xFFFF; + return pModal->antCtrlCommon; } static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index b471db5fb82d80d1b51564ad497b67e375a497f9..4a52cf03808b1aa207acb02fdd1f0394c38b73fa 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -17,17 +17,19 @@ #include "hw.h" #include "ar9002_phy.h" -static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah) +#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16)) + +static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) { return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF; } -static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah) +static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah) { return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; } -static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah) +static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct ath_common *common = ath9k_hw_common(ah); @@ -40,20 +42,20 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah) "Reading from EEPROM, not flash\n"); } - for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16); - addr++) { - if (!ath9k_hw_nvram_read(common, - addr + eep_start_loc, eep_data)) { + for (addr = 0; addr < NUM_EEP_WORDS; addr++) { + if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, + eep_data)) { ath_print(common, ATH_DBG_EEPROM, "Unable to read eeprom region\n"); return false; } eep_data++; } + return true; } -static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) +static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) { u32 sum = 0, el, integer; u16 temp, word, magic, magic2, *eepdata; @@ -63,8 +65,8 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { - if (!ath9k_hw_nvram_read(common, - AR5416_EEPROM_MAGIC_OFFSET, &magic)) { + if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, + &magic)) { ath_print(common, ATH_DBG_FATAL, "Reading Magic # failed\n"); return false; @@ -72,6 +74,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) ath_print(common, ATH_DBG_EEPROM, "Read Magic = 0x%04X\n", magic); + if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); @@ -79,9 +82,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) need_swap = true; eepdata = (u16 *)(&ah->eeprom); - for (addr = 0; - addr < sizeof(struct ar9287_eeprom) / sizeof(u16); - addr++) { + for (addr = 0; addr < NUM_EEP_WORDS; addr++) { temp = swab16(*eepdata); *eepdata = temp; eepdata++; @@ -89,13 +90,14 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) } else { ath_print(common, ATH_DBG_FATAL, "Invalid EEPROM Magic. " - "endianness mismatch.\n"); + "Endianness mismatch.\n"); return -EINVAL; } } } - ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? - "True" : "False"); + + ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", + need_swap ? "True" : "False"); if (need_swap) el = swab16(ah->eeprom.map9287.baseEepHeader.length); @@ -108,6 +110,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) el = el / sizeof(u16); eepdata = (u16 *)(&ah->eeprom); + for (i = 0; i < el; i++) sum ^= *eepdata++; @@ -161,7 +164,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) return 0; } -static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah, +static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { struct ar9287_eeprom *eep = &ah->eeprom.map9287; @@ -170,6 +173,7 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah, u16 ver_minor; ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK; + switch (param) { case EEP_NFTHRESH_2: return pModal->noiseFloorThreshCh[0]; @@ -214,29 +218,29 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah, } } - -static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, - struct ath9k_channel *chan, - struct cal_data_per_freq_ar9287 *pRawDataSet, - u8 *bChans, u16 availPiers, - u16 tPdGainOverlap, int16_t *pMinCalPower, - u16 *pPdGainBoundaries, u8 *pPDADCValues, - u16 numXpdGains) +static void ath9k_hw_get_ar9287_gain_boundaries_pdadcs(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_data_per_freq_ar9287 *pRawDataSet, + u8 *bChans, u16 availPiers, + u16 tPdGainOverlap, + u16 *pPdGainBoundaries, + u8 *pPDADCValues, + u16 numXpdGains) { -#define TMP_VAL_VPD_TABLE \ +#define TMP_VAL_VPD_TABLE \ ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep)); - int i, j, k; - int16_t ss; - u16 idxL = 0, idxR = 0, numPiers; - u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; - u8 minPwrT4[AR9287_NUM_PD_GAINS]; - u8 maxPwrT4[AR9287_NUM_PD_GAINS]; - int16_t vpdStep; - int16_t tmpVal; - u16 sizeCurrVpdTable, maxIndex, tgtIndex; - bool match; - int16_t minDelta = 0; + int i, j, k; + int16_t ss; + u16 idxL = 0, idxR = 0, numPiers; + u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; + u8 minPwrT4[AR9287_NUM_PD_GAINS]; + u8 maxPwrT4[AR9287_NUM_PD_GAINS]; + int16_t vpdStep; + int16_t tmpVal; + u16 sizeCurrVpdTable, maxIndex, tgtIndex; + bool match; + int16_t minDelta = 0; struct chan_centers centers; static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; @@ -245,6 +249,7 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS] [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS); ath9k_hw_get_channel_centers(ah, chan, ¢ers); for (numPiers = 0; numPiers < availPiers; numPiers++) { @@ -253,18 +258,18 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, } match = ath9k_hw_get_lower_upper_index( - (u8)FREQ2FBIN(centers.synth_center, - IS_CHAN_2GHZ(chan)), bChans, numPiers, - &idxL, &idxR); + (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), + bChans, numPiers, &idxL, &idxR); if (match) { for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0]; maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], - pRawDataSet[idxL].pwrPdg[i], - pRawDataSet[idxL].vpdPdg[i], - AR9287_PD_GAIN_ICEPTS, vpdTableI[i]); + pRawDataSet[idxL].pwrPdg[i], + pRawDataSet[idxL].vpdPdg[i], + AR9287_PD_GAIN_ICEPTS, + vpdTableI[i]); } } else { for (i = 0; i < numXpdGains; i++) { @@ -275,61 +280,58 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, minPwrT4[i] = max(pPwrL[0], pPwrR[0]); - maxPwrT4[i] = - min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1], - pPwrR[AR9287_PD_GAIN_ICEPTS - 1]); + maxPwrT4[i] = min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1], + pPwrR[AR9287_PD_GAIN_ICEPTS - 1]); ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], - pPwrL, pVpdL, - AR9287_PD_GAIN_ICEPTS, - vpdTableL[i]); + pPwrL, pVpdL, + AR9287_PD_GAIN_ICEPTS, + vpdTableL[i]); ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], - pPwrR, pVpdR, - AR9287_PD_GAIN_ICEPTS, - vpdTableR[i]); + pPwrR, pVpdR, + AR9287_PD_GAIN_ICEPTS, + vpdTableR[i]); for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { - vpdTableI[i][j] = - (u8)(ath9k_hw_interpolate((u16) - FREQ2FBIN(centers. synth_center, - IS_CHAN_2GHZ(chan)), - bChans[idxL], bChans[idxR], - vpdTableL[i][j], vpdTableR[i][j])); + vpdTableI[i][j] = (u8)(ath9k_hw_interpolate( + (u16)FREQ2FBIN(centers. synth_center, + IS_CHAN_2GHZ(chan)), + bChans[idxL], bChans[idxR], + vpdTableL[i][j], vpdTableR[i][j])); } } } - *pMinCalPower = (int16_t)(minPwrT4[0] / 2); k = 0; + for (i = 0; i < numXpdGains; i++) { if (i == (numXpdGains - 1)) - pPdGainBoundaries[i] = (u16)(maxPwrT4[i] / 2); + pPdGainBoundaries[i] = + (u16)(maxPwrT4[i] / 2); else - pPdGainBoundaries[i] = (u16)((maxPwrT4[i] + - minPwrT4[i+1]) / 4); + pPdGainBoundaries[i] = + (u16)((maxPwrT4[i] + minPwrT4[i+1]) / 4); pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER, - pPdGainBoundaries[i]); + pPdGainBoundaries[i]); - if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) { - minDelta = pPdGainBoundaries[0] - 23; - pPdGainBoundaries[0] = 23; - } else - minDelta = 0; + minDelta = 0; if (i == 0) { if (AR_SREV_9280_10_OR_LATER(ah)) ss = (int16_t)(0 - (minPwrT4[i] / 2)); else ss = 0; - } else + } else { ss = (int16_t)((pPdGainBoundaries[i-1] - - (minPwrT4[i] / 2)) - + (minPwrT4[i] / 2)) - tPdGainOverlap + 1 + minDelta); + } vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); @@ -348,12 +350,13 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - vpdTableI[i][sizeCurrVpdTable - 2]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + if (tgtIndex > maxIndex) { while ((ss <= tgtIndex) && (k < (AR9287_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t) TMP_VAL_VPD_TABLE; - pPDADCValues[k++] = (u8)((tmpVal > 255) ? - 255 : tmpVal); + pPDADCValues[k++] = + (u8)((tmpVal > 255) ? 255 : tmpVal); ss++; } } @@ -375,10 +378,9 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah, struct ath9k_channel *chan, struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop, - u8 *pCalChans, u16 availPiers, - int8_t *pPwr) + u8 *pCalChans, u16 availPiers, int8_t *pPwr) { - u16 idxL = 0, idxR = 0, numPiers; + u16 idxL = 0, idxR = 0, numPiers; bool match; struct chan_centers centers; @@ -390,15 +392,14 @@ static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah, } match = ath9k_hw_get_lower_upper_index( - (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), - pCalChans, numPiers, - &idxL, &idxR); + (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), + pCalChans, numPiers, &idxL, &idxR); if (match) { *pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0]; } else { *pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] + - (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2; + (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2; } } @@ -409,16 +410,22 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah, u32 tmpVal; u32 a; + /* Enable OLPC for chain 0 */ + tmpVal = REG_READ(ah, 0xa270); tmpVal = tmpVal & 0xFCFFFFFF; tmpVal = tmpVal | (0x3 << 24); REG_WRITE(ah, 0xa270, tmpVal); + /* Enable OLPC for chain 1 */ + tmpVal = REG_READ(ah, 0xb270); tmpVal = tmpVal & 0xFCFFFFFF; tmpVal = tmpVal | (0x3 << 24); REG_WRITE(ah, 0xb270, tmpVal); + /* Write the OLPC ref power for chain 0 */ + if (chain == 0) { tmpVal = REG_READ(ah, 0xa398); tmpVal = tmpVal & 0xff00ffff; @@ -427,6 +434,8 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah, REG_WRITE(ah, 0xa398, tmpVal); } + /* Write the OLPC ref power for chain 1 */ + if (chain == 1) { tmpVal = REG_READ(ah, 0xb398); tmpVal = tmpVal & 0xff00ffff; @@ -436,28 +445,28 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah, } } -static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah, +static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah, struct ath9k_channel *chan, int16_t *pTxPowerIndexOffset) { - struct ath_common *common = ath9k_hw_common(ah); struct cal_data_per_freq_ar9287 *pRawDataset; struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop; - u8 *pCalBChans = NULL; + u8 *pCalBChans = NULL; u16 pdGainOverlap_t2; - u8 pdadcValues[AR9287_NUM_PDADC_VALUES]; + u8 pdadcValues[AR9287_NUM_PDADC_VALUES]; u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK]; u16 numPiers = 0, i, j; - int16_t tMinCalPower; u16 numXpdGain, xpdMask; u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0}; - u32 reg32, regOffset, regChainOffset; - int16_t modalIdx, diff = 0; + u32 reg32, regOffset, regChainOffset, regval; + int16_t modalIdx, diff = 0; struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; + modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; xpdMask = pEepData->modalHeader.xpdGain; + if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= - AR9287_EEP_MINOR_VER_2) + AR9287_EEP_MINOR_VER_2) pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; else pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), @@ -466,15 +475,16 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah, if (IS_CHAN_2GHZ(chan)) { pCalBChans = pEepData->calFreqPier2G; numPiers = AR9287_NUM_2G_CAL_PIERS; - if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { pRawDatasetOpenLoop = - (struct cal_data_op_loop_ar9287 *) - pEepData->calPierData2G[0]; + (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[0]; ah->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0]; } } numXpdGain = 0; + + /* Calculate the value of xpdgains from the xpdGain Mask */ for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) { if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) { if (numXpdGain >= AR9287_NUM_PD_GAINS) @@ -496,99 +506,79 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah, for (i = 0; i < AR9287_MAX_CHAINS; i++) { regChainOffset = i * 0x1000; + if (pEepData->baseEepHeader.txMask & (1 << i)) { - pRawDatasetOpenLoop = (struct cal_data_op_loop_ar9287 *) - pEepData->calPierData2G[i]; - if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + pRawDatasetOpenLoop = + (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[i]; + + if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { int8_t txPower; ar9287_eeprom_get_tx_gain_index(ah, chan, - pRawDatasetOpenLoop, - pCalBChans, numPiers, - &txPower); + pRawDatasetOpenLoop, + pCalBChans, numPiers, + &txPower); ar9287_eeprom_olpc_set_pdadcs(ah, txPower, i); } else { pRawDataset = (struct cal_data_per_freq_ar9287 *) pEepData->calPierData2G[i]; - ath9k_hw_get_AR9287_gain_boundaries_pdadcs( - ah, chan, pRawDataset, - pCalBChans, numPiers, - pdGainOverlap_t2, - &tMinCalPower, gainBoundaries, - pdadcValues, numXpdGain); + + ath9k_hw_get_ar9287_gain_boundaries_pdadcs(ah, chan, + pRawDataset, + pCalBChans, numPiers, + pdGainOverlap_t2, + gainBoundaries, + pdadcValues, + numXpdGain); } if (i == 0) { - if (!ath9k_hw_AR9287_get_eeprom( - ah, EEP_OL_PWRCTRL)) { - REG_WRITE(ah, AR_PHY_TPCRG5 + - regChainOffset, - SM(pdGainOverlap_t2, - AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | - SM(gainBoundaries[0], - AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) - | SM(gainBoundaries[1], - AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) - | SM(gainBoundaries[2], - AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) - | SM(gainBoundaries[3], - AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); + if (!ath9k_hw_ar9287_get_eeprom(ah, + EEP_OL_PWRCTRL)) { + + regval = SM(pdGainOverlap_t2, + AR_PHY_TPCRG5_PD_GAIN_OVERLAP) + | SM(gainBoundaries[0], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) + | SM(gainBoundaries[1], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) + | SM(gainBoundaries[2], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) + | SM(gainBoundaries[3], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4); + + REG_WRITE(ah, + AR_PHY_TPCRG5 + regChainOffset, + regval); } } if ((int32_t)AR9287_PWR_TABLE_OFFSET_DB != - pEepData->baseEepHeader.pwrTableOffset) { - diff = (u16) - (pEepData->baseEepHeader.pwrTableOffset - - (int32_t)AR9287_PWR_TABLE_OFFSET_DB); + pEepData->baseEepHeader.pwrTableOffset) { + diff = (u16)(pEepData->baseEepHeader.pwrTableOffset - + (int32_t)AR9287_PWR_TABLE_OFFSET_DB); diff *= 2; - for (j = 0; - j < ((u16)AR9287_NUM_PDADC_VALUES-diff); - j++) + for (j = 0; j < ((u16)AR9287_NUM_PDADC_VALUES-diff); j++) pdadcValues[j] = pdadcValues[j+diff]; for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff); j < AR9287_NUM_PDADC_VALUES; j++) pdadcValues[j] = - pdadcValues[ - AR9287_NUM_PDADC_VALUES-diff]; + pdadcValues[AR9287_NUM_PDADC_VALUES-diff]; } - if (!ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { - regOffset = AR_PHY_BASE + (672 << 2) + - regChainOffset; - for (j = 0; j < 32; j++) { - reg32 = ((pdadcValues[4*j + 0] - & 0xFF) << 0) | - ((pdadcValues[4*j + 1] - & 0xFF) << 8) | - ((pdadcValues[4*j + 2] - & 0xFF) << 16) | - ((pdadcValues[4*j + 3] - & 0xFF) << 24) ; - REG_WRITE(ah, regOffset, reg32); + if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + regOffset = AR_PHY_BASE + + (672 << 2) + regChainOffset; - ath_print(common, ATH_DBG_EEPROM, - "PDADC (%d,%4x): %4.4x " - "%8.8x\n", - i, regChainOffset, regOffset, - reg32); - - ath_print(common, ATH_DBG_EEPROM, - "PDADC: Chain %d | " - "PDADC %3d Value %3d | " - "PDADC %3d Value %3d | " - "PDADC %3d Value %3d | " - "PDADC %3d Value %3d |\n", - i, 4 * j, pdadcValues[4 * j], - 4 * j + 1, - pdadcValues[4 * j + 1], - 4 * j + 2, - pdadcValues[4 * j + 2], - 4 * j + 3, - pdadcValues[4 * j + 3]); + for (j = 0; j < 32; j++) { + reg32 = ((pdadcValues[4*j + 0] & 0xFF) << 0) + | ((pdadcValues[4*j + 1] & 0xFF) << 8) + | ((pdadcValues[4*j + 2] & 0xFF) << 16) + | ((pdadcValues[4*j + 3] & 0xFF) << 24); + REG_WRITE(ah, regOffset, reg32); regOffset += 4; } } @@ -598,30 +588,45 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah, *pTxPowerIndexOffset = 0; } -static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, - struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl, - u16 AntennaReduction, u16 twiceMaxRegulatoryPower, - u16 powerLimit) +static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, + struct ath9k_channel *chan, + int16_t *ratesArray, + u16 cfgCtl, + u16 AntennaReduction, + u16 twiceMaxRegulatoryPower, + u16 powerLimit) { +#define CMP_CTL \ + (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ + pEepData->ctlIndex[i]) + +#define CMP_NO_CTL \ + (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ + ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) + #define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; static const u16 tpScaleReductionTable[5] = { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; int i; - int16_t twiceLargestAntenna; + int16_t twiceLargestAntenna; struct cal_ctl_data_ar9287 *rep; struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} }, targetPowerCck = {0, {0, 0, 0, 0} }; struct cal_target_power_leg targetPowerOfdmExt = {0, {0, 0, 0, 0} }, targetPowerCckExt = {0, {0, 0, 0, 0} }; - struct cal_target_power_ht targetPowerHt20, + struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {0, {0, 0, 0, 0} }; u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; - u16 ctlModesFor11g[] = - {CTL_11B, CTL_11G, CTL_2GHT20, - CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40}; + u16 ctlModesFor11g[] = {CTL_11B, + CTL_11G, + CTL_2GHT20, + CTL_11B_EXT, + CTL_11G_EXT, + CTL_2GHT40}; u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq; struct chan_centers centers; int tx_chainmask; @@ -631,19 +636,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, ath9k_hw_get_channel_centers(ah, chan, ¢ers); + /* Compute TxPower reduction due to Antenna Gain */ twiceLargestAntenna = max(pEepData->modalHeader.antennaGainCh[0], pEepData->modalHeader.antennaGainCh[1]); + twiceLargestAntenna = (int16_t)min((AntennaReduction) - + twiceLargestAntenna, 0); - twiceLargestAntenna = (int16_t)min((AntennaReduction) - - twiceLargestAntenna, 0); - + /* + * scaledPower is the minimum of the user input power level + * and the regulatory allowed power level. + */ maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; + if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) maxRegAllowedPower -= (tpScaleReductionTable[(regulatory->tp_scale)] * 2); scaledPower = min(powerLimit, maxRegAllowedPower); + /* + * Reduce scaled Power by number of chains active + * to get the per chain tx power level. + */ switch (ar5416_get_ntxchains(tx_chainmask)) { case 1: break; @@ -656,9 +670,14 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, } scaledPower = max((u16)0, scaledPower); + /* + * Get TX power from EEPROM. + */ if (IS_CHAN_2GHZ(chan)) { + /* CTL_11B, CTL_11G, CTL_2GHT20 */ numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; + pCtlMode = ctlModesFor11g; ath9k_hw_get_legacy_target_powers(ah, chan, @@ -675,6 +694,7 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, &targetPowerHt20, 8, false); if (IS_CHAN_HT40(chan)) { + /* All 2G CTLs */ numCtlModes = ARRAY_SIZE(ctlModesFor11g); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT40, @@ -692,8 +712,9 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, } for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { - bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || - (pCtlMode[ctlMode] == CTL_2GHT40); + bool isHt40CtlMode = + (pCtlMode[ctlMode] == CTL_2GHT40) ? true : false; + if (isHt40CtlMode) freq = centers.synth_center; else if (pCtlMode[ctlMode] & EXT_ADDITIVE) @@ -701,31 +722,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, else freq = centers.ctl_center; - if (ah->eep_ops->get_eeprom_ver(ah) == 14 && - ah->eep_ops->get_eeprom_rev(ah) <= 2) - twiceMaxEdgePower = AR5416_MAX_RATE_POWER; - + /* Walk through the CTL indices stored in EEPROM */ for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { - if ((((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - pEepData->ctlIndex[i]) || - (((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - ((pEepData->ctlIndex[i] & - CTL_MODE_M) | SD_NO_CTL))) { + struct cal_ctl_edges *pRdEdgesPower; + /* + * Compare test group from regulatory channel list + * with test mode from pCtlMode list + */ + if (CMP_CTL || CMP_NO_CTL) { rep = &(pEepData->ctlData[i]); - twiceMinEdgePower = ath9k_hw_get_max_edge_power( - freq, - rep->ctlEdges[ar5416_get_ntxchains( - tx_chainmask) - 1], - IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES); - - if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) - twiceMaxEdgePower = min( - twiceMaxEdgePower, - twiceMinEdgePower); - else { + pRdEdgesPower = + rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1]; + + twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq, + pRdEdgesPower, + IS_CHAN_2GHZ(chan), + AR5416_NUM_BAND_EDGES); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { + twiceMaxEdgePower = min(twiceMaxEdgePower, + twiceMinEdgePower); + } else { twiceMaxEdgePower = twiceMinEdgePower; break; } @@ -734,55 +752,48 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); + /* Apply ctl mode to correct target power set */ switch (pCtlMode[ctlMode]) { case CTL_11B: - for (i = 0; - i < ARRAY_SIZE(targetPowerCck.tPow2x); - i++) { - targetPowerCck.tPow2x[i] = (u8)min( - (u16)targetPowerCck.tPow2x[i], - minCtlPower); + for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) { + targetPowerCck.tPow2x[i] = + (u8)min((u16)targetPowerCck.tPow2x[i], + minCtlPower); } break; case CTL_11A: case CTL_11G: - for (i = 0; - i < ARRAY_SIZE(targetPowerOfdm.tPow2x); - i++) { - targetPowerOfdm.tPow2x[i] = (u8)min( - (u16)targetPowerOfdm.tPow2x[i], - minCtlPower); + for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) { + targetPowerOfdm.tPow2x[i] = + (u8)min((u16)targetPowerOfdm.tPow2x[i], + minCtlPower); } break; case CTL_5GHT20: case CTL_2GHT20: - for (i = 0; - i < ARRAY_SIZE(targetPowerHt20.tPow2x); - i++) { - targetPowerHt20.tPow2x[i] = (u8)min( - (u16)targetPowerHt20.tPow2x[i], - minCtlPower); + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) { + targetPowerHt20.tPow2x[i] = + (u8)min((u16)targetPowerHt20.tPow2x[i], + minCtlPower); } break; case CTL_11B_EXT: - targetPowerCckExt.tPow2x[0] = (u8)min( - (u16)targetPowerCckExt.tPow2x[0], - minCtlPower); + targetPowerCckExt.tPow2x[0] = + (u8)min((u16)targetPowerCckExt.tPow2x[0], + minCtlPower); break; case CTL_11A_EXT: case CTL_11G_EXT: - targetPowerOfdmExt.tPow2x[0] = (u8)min( - (u16)targetPowerOfdmExt.tPow2x[0], - minCtlPower); + targetPowerOfdmExt.tPow2x[0] = + (u8)min((u16)targetPowerOfdmExt.tPow2x[0], + minCtlPower); break; case CTL_5GHT40: case CTL_2GHT40: - for (i = 0; - i < ARRAY_SIZE(targetPowerHt40.tPow2x); - i++) { - targetPowerHt40.tPow2x[i] = (u8)min( - (u16)targetPowerHt40.tPow2x[i], - minCtlPower); + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { + targetPowerHt40.tPow2x[i] = + (u8)min((u16)targetPowerHt40.tPow2x[i], + minCtlPower); } break; default: @@ -790,12 +801,13 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, } } + /* Now set the rates array */ + ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = ratesArray[rate18mb] = - ratesArray[rate24mb] = - targetPowerOfdm.tPow2x[0]; + ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0]; ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; @@ -807,12 +819,12 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, if (IS_CHAN_2GHZ(chan)) { ratesArray[rate1l] = targetPowerCck.tPow2x[0]; - ratesArray[rate2s] = ratesArray[rate2l] = - targetPowerCck.tPow2x[1]; - ratesArray[rate5_5s] = ratesArray[rate5_5l] = - targetPowerCck.tPow2x[2]; - ratesArray[rate11s] = ratesArray[rate11l] = - targetPowerCck.tPow2x[3]; + ratesArray[rate2s] = + ratesArray[rate2l] = targetPowerCck.tPow2x[1]; + ratesArray[rate5_5s] = + ratesArray[rate5_5l] = targetPowerCck.tPow2x[2]; + ratesArray[rate11s] = + ratesArray[rate11l] = targetPowerCck.tPow2x[3]; } if (IS_CHAN_HT40(chan)) { for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) @@ -821,28 +833,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; + if (IS_CHAN_2GHZ(chan)) ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; } +#undef CMP_CTL +#undef CMP_NO_CTL #undef REDUCE_SCALED_POWER_BY_TWO_CHAIN #undef REDUCE_SCALED_POWER_BY_THREE_CHAIN } -static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, +static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 twiceMaxRegulatoryPower, u8 powerLimit) { -#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 -#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 - struct ath_common *common = ath9k_hw_common(ah); struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader; int16_t ratesArray[Ar5416RateSize]; - int16_t txPowerIndexOffset = 0; + int16_t txPowerIndexOffset = 0; u8 ht40PowerIncForPdadc = 2; int i; @@ -852,13 +864,13 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, AR9287_EEP_MINOR_VER_2) ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; - ath9k_hw_set_AR9287_power_per_rate_table(ah, chan, + ath9k_hw_set_ar9287_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, twiceAntennaReduction, twiceMaxRegulatoryPower, powerLimit); - ath9k_hw_set_AR9287_power_cal_table(ah, chan, &txPowerIndexOffset); + ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset); for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); @@ -871,6 +883,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; } + /* OFDM power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, ATH9K_POW_SM(ratesArray[rate18mb], 24) | ATH9K_POW_SM(ratesArray[rate12mb], 16) @@ -883,6 +896,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, | ATH9K_POW_SM(ratesArray[rate36mb], 8) | ATH9K_POW_SM(ratesArray[rate24mb], 0)); + /* CCK power per rate */ if (IS_CHAN_2GHZ(chan)) { REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, ATH9K_POW_SM(ratesArray[rate2s], 24) @@ -896,6 +910,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); } + /* HT20 power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, ATH9K_POW_SM(ratesArray[rateHt20_3], 24) | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) @@ -908,8 +923,9 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); + /* HT40 power per rate */ if (IS_CHAN_HT40(chan)) { - if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, ATH9K_POW_SM(ratesArray[rateHt40_3], 24) | ATH9K_POW_SM(ratesArray[rateHt40_2], 16) @@ -943,6 +959,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, ht40PowerIncForPdadc, 0)); } + /* Dup/Ext power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) | ATH9K_POW_SM(ratesArray[rateExtCck], 16) @@ -960,37 +977,20 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2; else regulatory->max_power_level = ratesArray[i]; - - switch (ar5416_get_ntxchains(ah->txchainmask)) { - case 1: - break; - case 2: - regulatory->max_power_level += - INCREASE_MAXPOW_BY_TWO_CHAIN; - break; - case 3: - regulatory->max_power_level += - INCREASE_MAXPOW_BY_THREE_CHAIN; - break; - default: - ath_print(common, ATH_DBG_EEPROM, - "Invalid chainmask configuration\n"); - break; - } } -static void ath9k_hw_AR9287_set_addac(struct ath_hw *ah, +static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah, struct ath9k_channel *chan) { } -static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah, +static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct modal_eep_ar9287_header *pModal = &eep->modalHeader; u16 antWrites[AR9287_ANT_16S]; - u32 regChainOffset; + u32 regChainOffset, regval; u8 txRxAttenLocal; int i, j, offset_num; @@ -1077,42 +1077,37 @@ static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB1, - AR9287_AN_RF2G3_DB1_S, pModal->db1); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB2, - AR9287_AN_RF2G3_DB2_S, pModal->db2); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, - AR9287_AN_RF2G3_OB_CCK, - AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, - AR9287_AN_RF2G3_OB_PSK, - AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, - AR9287_AN_RF2G3_OB_QAM, - AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, - AR9287_AN_RF2G3_OB_PAL_OFF, - AR9287_AN_RF2G3_OB_PAL_OFF_S, - pModal->ob_pal_off); - - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, - AR9287_AN_RF2G3_DB1, AR9287_AN_RF2G3_DB1_S, - pModal->db1); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, AR9287_AN_RF2G3_DB2, - AR9287_AN_RF2G3_DB2_S, pModal->db2); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, - AR9287_AN_RF2G3_OB_CCK, - AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, - AR9287_AN_RF2G3_OB_PSK, - AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, - AR9287_AN_RF2G3_OB_QAM, - AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam); - ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, - AR9287_AN_RF2G3_OB_PAL_OFF, - AR9287_AN_RF2G3_OB_PAL_OFF_S, - pModal->ob_pal_off); + regval = REG_READ(ah, AR9287_AN_RF2G3_CH0); + regval &= ~(AR9287_AN_RF2G3_DB1 | + AR9287_AN_RF2G3_DB2 | + AR9287_AN_RF2G3_OB_CCK | + AR9287_AN_RF2G3_OB_PSK | + AR9287_AN_RF2G3_OB_QAM | + AR9287_AN_RF2G3_OB_PAL_OFF); + regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) | + SM(pModal->db2, AR9287_AN_RF2G3_DB2) | + SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) | + SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) | + SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) | + SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF)); + + ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH0, regval); + + regval = REG_READ(ah, AR9287_AN_RF2G3_CH1); + regval &= ~(AR9287_AN_RF2G3_DB1 | + AR9287_AN_RF2G3_DB2 | + AR9287_AN_RF2G3_OB_CCK | + AR9287_AN_RF2G3_OB_PSK | + AR9287_AN_RF2G3_OB_QAM | + AR9287_AN_RF2G3_OB_PAL_OFF); + regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) | + SM(pModal->db2, AR9287_AN_RF2G3_DB2) | + SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) | + SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) | + SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) | + SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF)); + + ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH1, regval); REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); @@ -1125,26 +1120,27 @@ static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah, pModal->xpaBiasLvl); } -static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah, +static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah, enum ieee80211_band freq_band) { return 1; } -static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_ar9287_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct modal_eep_ar9287_header *pModal = &eep->modalHeader; - return pModal->antCtrlCommon & 0xFFFF; + return pModal->antCtrlCommon; } -static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah, +static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { #define EEP_MAP9287_SPURCHAN \ (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan) + struct ath_common *common = ath9k_hw_common(ah); u16 spur_val = AR_NO_SPUR; @@ -1171,15 +1167,15 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah, } const struct eeprom_ops eep_ar9287_ops = { - .check_eeprom = ath9k_hw_AR9287_check_eeprom, - .get_eeprom = ath9k_hw_AR9287_get_eeprom, - .fill_eeprom = ath9k_hw_AR9287_fill_eeprom, - .get_eeprom_ver = ath9k_hw_AR9287_get_eeprom_ver, - .get_eeprom_rev = ath9k_hw_AR9287_get_eeprom_rev, - .get_num_ant_config = ath9k_hw_AR9287_get_num_ant_config, - .get_eeprom_antenna_cfg = ath9k_hw_AR9287_get_eeprom_antenna_cfg, - .set_board_values = ath9k_hw_AR9287_set_board_values, - .set_addac = ath9k_hw_AR9287_set_addac, - .set_txpower = ath9k_hw_AR9287_set_txpower, - .get_spur_channel = ath9k_hw_AR9287_get_spur_channel + .check_eeprom = ath9k_hw_ar9287_check_eeprom, + .get_eeprom = ath9k_hw_ar9287_get_eeprom, + .fill_eeprom = ath9k_hw_ar9287_fill_eeprom, + .get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver, + .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev, + .get_num_ant_config = ath9k_hw_ar9287_get_num_ant_config, + .get_eeprom_antenna_cfg = ath9k_hw_ar9287_get_eeprom_antenna_cfg, + .set_board_values = ath9k_hw_ar9287_set_board_values, + .set_addac = ath9k_hw_ar9287_set_addac, + .set_txpower = ath9k_hw_ar9287_set_txpower, + .get_spur_channel = ath9k_hw_ar9287_get_spur_channel }; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 7e1ed78d0e64b0ec63006a8ef751a253628da249..afa2b73ddbdde865bb91491f283dd7eceb6cdf49 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -593,7 +593,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah, struct ath9k_channel *chan, struct cal_data_per_freq *pRawDataSet, u8 *bChans, u16 availPiers, - u16 tPdGainOverlap, int16_t *pMinCalPower, + u16 tPdGainOverlap, u16 *pPdGainBoundaries, u8 *pPDADCValues, u16 numXpdGains) { @@ -617,6 +617,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah, int16_t minDelta = 0; struct chan_centers centers; + memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS); ath9k_hw_get_channel_centers(ah, chan, ¢ers); for (numPiers = 0; numPiers < availPiers; numPiers++) { @@ -674,8 +675,6 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah, } } - *pMinCalPower = (int16_t)(minPwrT4[0] / 2); - k = 0; for (i = 0; i < numXpdGains; i++) { @@ -729,7 +728,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah, vpdTableI[i][sizeCurrVpdTable - 2]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); - if (tgtIndex > maxIndex) { + if (tgtIndex >= maxIndex) { while ((ss <= tgtIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] + @@ -837,7 +836,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; u16 numPiers, i, j; - int16_t tMinCalPower, diff = 0; + int16_t diff = 0; u16 numXpdGain, xpdMask; u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 }; u32 reg32, regOffset, regChainOffset; @@ -922,7 +921,6 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, chan, pRawDataset, pCalBChans, numPiers, pdGainOverlap_t2, - &tMinCalPower, gainBoundaries, pdadcValues, numXpdGain); @@ -1437,14 +1435,14 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah, return num_ant_config; } -static u16 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar5416_eeprom_def *eep = &ah->eeprom.def; struct modal_eep_header *pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); - return pModal->antCtrlCommon & 0xFFFF; + return pModal->antCtrlCommon; } static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index 0ee75e79fe35a38f9e0aa4c72c3570922fb17bf7..3a8ee999da5dc111414b6aebf284930dff75cae9 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -76,7 +76,8 @@ static void ath_led_brightness(struct led_classdev *led_cdev, case LED_FULL: if (led->led_type == ATH_LED_ASSOC) { sc->sc_flags |= SC_OP_LED_ASSOCIATED; - ieee80211_queue_delayed_work(sc->hw, + if (led_blink) + ieee80211_queue_delayed_work(sc->hw, &sc->ath_led_blink_work, 0); } else if (led->led_type == ATH_LED_RADIO) { ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); @@ -143,7 +144,8 @@ void ath_init_leds(struct ath_softc *sc) /* LED off, active low */ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); - INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); + if (led_blink) + INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); trigger = ieee80211_get_radio_led_name(sc->hw); snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), @@ -180,7 +182,8 @@ void ath_init_leds(struct ath_softc *sc) return; fail: - cancel_delayed_work_sync(&sc->ath_led_blink_work); + if (led_blink) + cancel_delayed_work_sync(&sc->ath_led_blink_work); ath_deinit_leds(sc); } diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 23c15aa9fbd5167c51e80601eb5213c0dd144283..61c1bee3f26a1159b6ac869e621d1573e0250f5c 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -16,12 +16,27 @@ #include "htc.h" -#define ATH9K_FW_USB_DEV(devid, fw) \ - { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw } +/* identify firmware images */ +#define FIRMWARE_AR7010 "ar7010.fw" +#define FIRMWARE_AR7010_1_1 "ar7010_1_1.fw" +#define FIRMWARE_AR9271 "ar9271.fw" + +MODULE_FIRMWARE(FIRMWARE_AR7010); +MODULE_FIRMWARE(FIRMWARE_AR7010_1_1); +MODULE_FIRMWARE(FIRMWARE_AR9271); static struct usb_device_id ath9k_hif_usb_ids[] = { - ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"), - ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"), + { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ + { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */ + { USB_DEVICE(0x0cf3, 0x7010) }, /* Atheros */ + { USB_DEVICE(0x0cf3, 0x7015) }, /* Atheros */ + { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */ + { USB_DEVICE(0x0846, 0x9018) }, /* Netgear WNDA3200 */ + { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ + { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ + { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ + { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ + { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ { }, }; @@ -760,6 +775,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) size_t len = hif_dev->firmware->size; u32 addr = AR9271_FIRMWARE; u8 *buf = kzalloc(4096, GFP_KERNEL); + u32 firm_offset; if (!buf) return -ENOMEM; @@ -783,32 +799,37 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) } kfree(buf); + if (hif_dev->device_id == 0x7010) + firm_offset = AR7010_FIRMWARE_TEXT; + else + firm_offset = AR9271_FIRMWARE_TEXT; + /* * Issue FW download complete command to firmware. */ err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0), FIRMWARE_DOWNLOAD_COMP, 0x40 | USB_DIR_OUT, - AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ); + firm_offset >> 8, 0, NULL, 0, HZ); if (err) return -EIO; dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n", - "ar9271.fw", (unsigned long) hif_dev->firmware->size); + hif_dev->fw_name, (unsigned long) hif_dev->firmware->size); return 0; } -static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, - const char *fw_name) +static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) { int ret; /* Request firmware */ - ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev); + ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name, + &hif_dev->udev->dev); if (ret) { dev_err(&hif_dev->udev->dev, - "ath9k_htc: Firmware - %s not found\n", fw_name); + "ath9k_htc: Firmware - %s not found\n", hif_dev->fw_name); goto err_fw_req; } @@ -824,7 +845,8 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, ret = ath9k_hif_usb_download_fw(hif_dev); if (ret) { dev_err(&hif_dev->udev->dev, - "ath9k_htc: Firmware - %s download failed\n", fw_name); + "ath9k_htc: Firmware - %s download failed\n", + hif_dev->fw_name); goto err_fw_download; } @@ -851,7 +873,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, { struct usb_device *udev = interface_to_usbdev(interface); struct hif_device_usb *hif_dev; - const char *fw_name = (const char *) id->driver_info; int ret = 0; hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL); @@ -876,7 +897,27 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, goto err_htc_hw_alloc; } - ret = ath9k_hif_usb_dev_init(hif_dev, fw_name); + /* Find out which firmware to load */ + + switch(hif_dev->device_id) { + case 0x7010: + case 0x9018: + if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) + hif_dev->fw_name = FIRMWARE_AR7010_1_1; + else + hif_dev->fw_name = FIRMWARE_AR7010; + break; + default: + hif_dev->fw_name = FIRMWARE_AR9271; + break; + } + + if (!hif_dev->fw_name) { + dev_err(&udev->dev, "Can't determine firmware !\n"); + goto err_htc_hw_alloc; + } + + ret = ath9k_hif_usb_dev_init(hif_dev); if (ret) { ret = -EINVAL; goto err_hif_init_usb; @@ -911,12 +952,10 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev) void *buf; int ret; - buf = kmalloc(4, GFP_KERNEL); + buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL); if (!buf) return; - memcpy(buf, &reboot_cmd, 4); - ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE), buf, 4, NULL, HZ); if (ret) diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h index 0aca49b6fcb68d33745dfeb3edcba3333c381b4d..2daf97b11c08bf02b78d1d85e4d4c85745d1f41b 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h @@ -19,6 +19,7 @@ #define AR9271_FIRMWARE 0x501000 #define AR9271_FIRMWARE_TEXT 0x903000 +#define AR7010_FIRMWARE_TEXT 0x906000 #define FIRMWARE_DOWNLOAD 0x30 #define FIRMWARE_DOWNLOAD_COMP 0x31 @@ -90,6 +91,7 @@ struct hif_device_usb { struct usb_anchor regout_submitted; struct usb_anchor rx_submitted; struct sk_buff *remain_skb; + const char *fw_name; int rx_remain_len; int rx_pkt_len; int rx_transfer_len; diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index c251603ab032f3668acaa9e5f8ce5bac63b1082f..3756400e6bf95658903a1675cd85efd65d4222c4 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -223,15 +223,6 @@ struct ath9k_htc_sta { enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID]; }; -struct ath9k_htc_aggr_work { - u16 tid; - u8 sta_addr[ETH_ALEN]; - struct ieee80211_hw *hw; - struct ieee80211_vif *vif; - enum ieee80211_ampdu_mlme_action action; - struct mutex mutex; -}; - #define ATH9K_HTC_RXBUF 256 #define HTC_RX_FRAME_HEADER_SIZE 40 @@ -257,12 +248,15 @@ struct ath9k_htc_tx_ctl { #define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) #define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++) +#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) + struct ath_tx_stats { u32 buf_queued; u32 buf_completed; u32 skb_queued; u32 skb_completed; u32 skb_dropped; + u32 queue_stats[WME_NUM_AC]; }; struct ath_rx_stats { @@ -286,11 +280,14 @@ struct ath9k_debug { #define TX_STAT_INC(c) do { } while (0) #define RX_STAT_INC(c) do { } while (0) +#define TX_QSTAT_INC(c) do { } while (0) + #endif /* CONFIG_ATH9K_HTC_DEBUGFS */ #define ATH_LED_PIN_DEF 1 #define ATH_LED_PIN_9287 8 #define ATH_LED_PIN_9271 15 +#define ATH_LED_PIN_7010 12 #define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */ #define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */ @@ -326,11 +323,10 @@ struct htc_beacon_config { #define OP_LED_ON BIT(4) #define OP_PREAMBLE_SHORT BIT(5) #define OP_PROTECT_ENABLE BIT(6) -#define OP_TXAGGR BIT(7) -#define OP_ASSOCIATED BIT(8) -#define OP_ENABLE_BEACON BIT(9) -#define OP_LED_DEINIT BIT(10) -#define OP_UNPLUGGED BIT(11) +#define OP_ASSOCIATED BIT(7) +#define OP_ENABLE_BEACON BIT(8) +#define OP_LED_DEINIT BIT(9) +#define OP_UNPLUGGED BIT(10) struct ath9k_htc_priv { struct device *dev; @@ -371,8 +367,6 @@ struct ath9k_htc_priv { struct ath9k_htc_rx rx; struct tasklet_struct tx_tasklet; struct sk_buff_head tx_queue; - struct ath9k_htc_aggr_work aggr_work; - struct delayed_work ath9k_aggr_work; struct delayed_work ath9k_ani_work; struct work_struct ps_work; @@ -390,13 +384,14 @@ struct ath9k_htc_priv { int led_off_duration; int led_on_cnt; int led_off_cnt; - int hwq_map[ATH9K_WME_AC_VO+1]; + + int beaconq; + int cabq; + int hwq_map[WME_NUM_AC]; #ifdef CONFIG_ATH9K_HTC_DEBUGFS struct ath9k_debug debug; #endif - struct ath9k_htc_target_rate tgt_rate; - struct mutex mutex; }; @@ -405,6 +400,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) common->bus_ops->read_cachesize(common, csz); } +void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv); void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif); void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending); @@ -424,8 +420,8 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv); void ath9k_tx_tasklet(unsigned long data); int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb); void ath9k_tx_cleanup(struct ath9k_htc_priv *priv); -bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, - enum ath9k_tx_queue_subtype qtype); +bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype); +int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv); int get_hw_qnum(u16 queue, int *hwq_map); int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum, struct ath9k_tx_queue_info *qinfo); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index c10c7d002eb7dcd615e21b0d9967f0b18f4c8ebf..bd1506e69105d77ad77396e4cef62da89fb75ced 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -222,6 +222,29 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending) spin_unlock_bh(&priv->beacon_lock); } +/* Currently, only for IBSS */ +void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv) +{ + struct ath_hw *ah = priv->ah; + struct ath9k_tx_queue_info qi, qi_be; + int qnum = priv->hwq_map[WME_AC_BE]; + + memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); + memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info)); + + ath9k_hw_get_txq_props(ah, qnum, &qi_be); + + qi.tqi_aifs = qi_be.tqi_aifs; + qi.tqi_cwmin = 4*qi_be.tqi_cwmin; + qi.tqi_cwmax = qi_be.tqi_cwmax; + + if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Unable to update beacon queue %u!\n", qnum); + } else { + ath9k_hw_resettxqueue(ah, priv->beaconq); + } +} void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index dc015077a8d908ce17db5bc10e1cdb8a80d725b8..148b43317fdb1d6b78502ff42d208b197be28c33 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -34,6 +34,13 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); .max_power = 20, \ } +#define CHAN5G(_freq, _idx) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 20, \ +} + static struct ieee80211_channel ath9k_2ghz_channels[] = { CHAN2G(2412, 0), /* Channel 1 */ CHAN2G(2417, 1), /* Channel 2 */ @@ -51,6 +58,37 @@ static struct ieee80211_channel ath9k_2ghz_channels[] = { CHAN2G(2484, 13), /* Channel 14 */ }; +static struct ieee80211_channel ath9k_5ghz_channels[] = { + /* _We_ call this UNII 1 */ + CHAN5G(5180, 14), /* Channel 36 */ + CHAN5G(5200, 15), /* Channel 40 */ + CHAN5G(5220, 16), /* Channel 44 */ + CHAN5G(5240, 17), /* Channel 48 */ + /* _We_ call this UNII 2 */ + CHAN5G(5260, 18), /* Channel 52 */ + CHAN5G(5280, 19), /* Channel 56 */ + CHAN5G(5300, 20), /* Channel 60 */ + CHAN5G(5320, 21), /* Channel 64 */ + /* _We_ call this "Middle band" */ + CHAN5G(5500, 22), /* Channel 100 */ + CHAN5G(5520, 23), /* Channel 104 */ + CHAN5G(5540, 24), /* Channel 108 */ + CHAN5G(5560, 25), /* Channel 112 */ + CHAN5G(5580, 26), /* Channel 116 */ + CHAN5G(5600, 27), /* Channel 120 */ + CHAN5G(5620, 28), /* Channel 124 */ + CHAN5G(5640, 29), /* Channel 128 */ + CHAN5G(5660, 30), /* Channel 132 */ + CHAN5G(5680, 31), /* Channel 136 */ + CHAN5G(5700, 32), /* Channel 140 */ + /* _We_ call this UNII 3 */ + CHAN5G(5745, 33), /* Channel 149 */ + CHAN5G(5765, 34), /* Channel 153 */ + CHAN5G(5785, 35), /* Channel 157 */ + CHAN5G(5805, 36), /* Channel 161 */ + CHAN5G(5825, 37), /* Channel 165 */ +}; + /* Atheros hardware rate code addition for short premble */ #define SHPCHECK(__hw_rate, __flags) \ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0) @@ -141,7 +179,7 @@ static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv, return htc_connect_service(priv->htc, &req, ep_id); } -static int ath9k_init_htc_services(struct ath9k_htc_priv *priv) +static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid) { int ret; @@ -199,10 +237,28 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv) if (ret) goto err; + /* + * Setup required credits before initializing HTC. + * This is a bit hacky, but, since queuing is done in + * the HIF layer, shouldn't matter much. + */ + + switch(devid) { + case 0x7010: + case 0x9018: + priv->htc->credits = 45; + break; + default: + priv->htc->credits = 33; + } + ret = htc_init(priv->htc); if (ret) goto err; + dev_info(priv->dev, "ath9k_htc: HTC initialized with %d credits\n", + priv->htc->credits); + return 0; err: @@ -398,17 +454,43 @@ static const struct ath_bus_ops ath9k_usb_bus_ops = { static void setup_ht_cap(struct ath9k_htc_priv *priv, struct ieee80211_sta_ht_cap *ht_info) { + struct ath_common *common = ath9k_hw_common(priv->ah); + u8 tx_streams, rx_streams; + int i; + ht_info->ht_supported = true; ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_SM_PS | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40; + if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + + ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); - ht_info->mcs.rx_mask[0] = 0xff; + + /* ath9k_htc supports only 1 or 2 stream devices */ + tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2); + rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2); + + ath_print(common, ATH_DBG_CONFIG, + "TX streams %d, RX streams: %d\n", + tx_streams, rx_streams); + + if (tx_streams != rx_streams) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_streams - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } + + for (i = 0; i < rx_streams; i++) + ht_info->mcs.rx_mask[i] = 0xff; + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; } @@ -420,23 +502,37 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv) for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++) priv->hwq_map[i] = -1; - if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) { + priv->beaconq = ath9k_hw_beaconq_setup(priv->ah); + if (priv->beaconq == -1) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup BEACON xmit queue\n"); + goto err; + } + + priv->cabq = ath9k_htc_cabq_setup(priv); + if (priv->cabq == -1) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup CAB xmit queue\n"); + goto err; + } + + if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for BE traffic\n"); goto err; } - if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) { + if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for BK traffic\n"); goto err; } - if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) { + if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for VI traffic\n"); goto err; } - if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) { + if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for VO traffic\n"); goto err; @@ -468,36 +564,6 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv) */ for (i = 0; i < common->keymax; i++) ath9k_hw_keyreset(priv->ah, (u16) i); - - if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER, - ATH9K_CIPHER_TKIP, NULL)) { - /* - * Whether we should enable h/w TKIP MIC. - * XXX: if we don't support WME TKIP MIC, then we wouldn't - * report WMM capable, so it's always safe to turn on - * TKIP MIC in this case. - */ - ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL); - } - - /* - * Check whether the separate key cache entries - * are required to handle both tx+rx MIC keys. - * With split mic keys the number of stations is limited - * to 27 otherwise 59. - */ - if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER, - ATH9K_CIPHER_TKIP, NULL) - && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER, - ATH9K_CIPHER_MIC, NULL) - && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT, - 0, NULL)) - common->splitmic = 1; - - /* turn on mcast key search if possible */ - if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) - (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, - 1, 1, NULL); } static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv) @@ -512,6 +578,17 @@ static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv) priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates = ARRAY_SIZE(ath9k_legacy_rates); } + + if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) { + priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels; + priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; + priv->sbands[IEEE80211_BAND_5GHZ].n_channels = + ARRAY_SIZE(ath9k_5ghz_channels); + priv->sbands[IEEE80211_BAND_5GHZ].bitrates = + ath9k_legacy_rates + 4; + priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates = + ARRAY_SIZE(ath9k_legacy_rates) - 4; + } } static void ath9k_init_misc(struct ath9k_htc_priv *priv) @@ -524,7 +601,6 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv) if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); - priv->op_flags |= OP_TXAGGR; priv->ah->opmode = NL80211_IFTYPE_STATION; } @@ -556,14 +632,12 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid) spin_lock_init(&priv->beacon_lock); spin_lock_init(&priv->tx_lock); mutex_init(&priv->mutex); - mutex_init(&priv->aggr_work.mutex); mutex_init(&priv->htc_pm_lock); tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet, (unsigned long)priv); tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet, (unsigned long)priv); tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv); - INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work); INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work); INIT_WORK(&priv->ps_work, ath9k_ps_work); @@ -643,11 +717,17 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->sbands[IEEE80211_BAND_2GHZ]; + if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->sbands[IEEE80211_BAND_5GHZ]; if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) { if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) setup_ht_cap(priv, &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap); + if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) + setup_ht_cap(priv, + &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap); } SET_IEEE80211_PERM_ADDR(hw, common->macaddr); @@ -747,7 +827,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, goto err_free; } - ret = ath9k_init_htc_services(priv); + ret = ath9k_init_htc_services(priv, devid); if (ret) goto err_init; @@ -790,7 +870,8 @@ int ath9k_htc_resume(struct htc_target *htc_handle) if (ret) return ret; - ret = ath9k_init_htc_services(htc_handle->drv_priv); + ret = ath9k_init_htc_services(htc_handle->drv_priv, + htc_handle->drv_priv->ah->hw_version.devid); return ret; } #endif diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9d371c18eb41c2da10e88daf2033259b6ad1657f..cf9bcc67ade2ea0f8c8a34b1d605c5e91cda2772 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -27,13 +27,11 @@ static struct dentry *ath9k_debugfs_root; static void ath_update_txpow(struct ath9k_htc_priv *priv) { struct ath_hw *ah = priv->ah; - u32 txpow; if (priv->curtxpow != priv->txpowlimit) { ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit); /* read back in case value is clamped */ - ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); - priv->curtxpow = txpow; + priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit; } } @@ -325,142 +323,129 @@ static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) tcap.flags_ext = 0x80601000; tcap.ampdu_limit = 0xffff0000; tcap.ampdu_subframes = 20; - tcap.tx_chainmask_legacy = 1; + tcap.tx_chainmask_legacy = priv->ah->caps.tx_chainmask; tcap.protmode = 1; - tcap.tx_chainmask = 1; + tcap.tx_chainmask = priv->ah->caps.tx_chainmask; WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap); return ret; } -static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, + struct ieee80211_sta *sta, + struct ath9k_htc_target_rate *trate) { - struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv; struct ieee80211_supported_band *sband; - struct ath9k_htc_target_rate trate; u32 caps = 0; - u8 cmd_rsp; - int i, j, ret; + int i, j; - memset(&trate, 0, sizeof(trate)); - - /* Only 2GHz is supported */ - sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band]; for (i = 0, j = 0; i < sband->n_bitrates; i++) { if (sta->supp_rates[sband->band] & BIT(i)) { - priv->tgt_rate.rates.legacy_rates.rs_rates[j] + trate->rates.legacy_rates.rs_rates[j] = (sband->bitrates[i].bitrate * 2) / 10; j++; } } - priv->tgt_rate.rates.legacy_rates.rs_nrates = j; + trate->rates.legacy_rates.rs_nrates = j; if (sta->ht_cap.ht_supported) { for (i = 0, j = 0; i < 77; i++) { if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8))) - priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i; + trate->rates.ht_rates.rs_rates[j++] = i; if (j == ATH_HTC_RATE_MAX) break; } - priv->tgt_rate.rates.ht_rates.rs_nrates = j; + trate->rates.ht_rates.rs_nrates = j; caps = WLAN_RC_HT_FLAG; + if (sta->ht_cap.mcs.rx_mask[1]) + caps |= WLAN_RC_DS_FLAG; if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) caps |= WLAN_RC_40_FLAG; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) + if (conf_is_ht40(&priv->hw->conf) && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + caps |= WLAN_RC_SGI_FLAG; + else if (conf_is_ht20(&priv->hw->conf) && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)) caps |= WLAN_RC_SGI_FLAG; - } - priv->tgt_rate.sta_index = ista->index; - priv->tgt_rate.isnew = 1; - trate = priv->tgt_rate; - priv->tgt_rate.capflags = cpu_to_be32(caps); - trate.capflags = cpu_to_be32(caps); + trate->sta_index = ista->index; + trate->isnew = 1; + trate->capflags = cpu_to_be32(caps); +} - WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate); +static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv, + struct ath9k_htc_target_rate *trate) +{ + struct ath_common *common = ath9k_hw_common(priv->ah); + int ret; + u8 cmd_rsp; + + WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate); if (ret) { ath_print(common, ATH_DBG_FATAL, "Unable to initialize Rate information on target\n"); - return ret; } - ath_print(common, ATH_DBG_CONFIG, - "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps); - return 0; + return ret; } -static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40) +static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv, + struct ieee80211_sta *sta) { - struct ath9k_htc_priv *priv = hw->priv; - struct ieee80211_conf *conf = &hw->conf; - - if (!conf_is_ht(conf)) - return false; - - if (!(priv->op_flags & OP_ASSOCIATED) || - (priv->op_flags & OP_SCANNING)) - return false; + struct ath_common *common = ath9k_hw_common(priv->ah); + struct ath9k_htc_target_rate trate; + int ret; - if (conf_is_ht40(conf)) { - if (priv->ah->curchan->chanmode & - (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) { - return false; - } else { - *cw40 = true; - return true; - } - } else { /* ht20 */ - if (priv->ah->curchan->chanmode & CHANNEL_HT20) - return false; - else - return true; - } + memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); + ath9k_htc_setup_rate(priv, sta, &trate); + ret = ath9k_htc_send_rate_cmd(priv, &trate); + if (!ret) + ath_print(common, ATH_DBG_CONFIG, + "Updated target sta: %pM, rate caps: 0x%X\n", + sta->addr, be32_to_cpu(trate.capflags)); } -static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40) +static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) { - struct ath9k_htc_target_rate trate; struct ath_common *common = ath9k_hw_common(priv->ah); + struct ath9k_htc_target_rate trate; + struct ieee80211_sta *sta; int ret; - u32 caps = be32_to_cpu(priv->tgt_rate.capflags); - u8 cmd_rsp; - - memset(&trate, 0, sizeof(trate)); - - trate = priv->tgt_rate; - - if (is_cw40) - caps |= WLAN_RC_40_FLAG; - else - caps &= ~WLAN_RC_40_FLAG; - priv->tgt_rate.capflags = cpu_to_be32(caps); - trate.capflags = cpu_to_be32(caps); + memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); - WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate); - if (ret) { - ath_print(common, ATH_DBG_FATAL, - "Unable to update Rate information on target\n"); + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (!sta) { + rcu_read_unlock(); return; } + ath9k_htc_setup_rate(priv, sta, &trate); + rcu_read_unlock(); - ath_print(common, ATH_DBG_CONFIG, "Rate control updated with " - "caps:0x%x on target\n", priv->tgt_rate.capflags); + ret = ath9k_htc_send_rate_cmd(priv, &trate); + if (!ret) + ath_print(common, ATH_DBG_CONFIG, + "Updated target sta: %pM, rate caps: 0x%X\n", + bss_conf->bssid, be32_to_cpu(trate.capflags)); } -static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv, - struct ieee80211_vif *vif, - u8 *sta_addr, u8 tid, bool oper) +static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_ampdu_mlme_action action, + u16 tid) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_aggr aggr; - struct ieee80211_sta *sta = NULL; struct ath9k_htc_sta *ista; int ret = 0; u8 cmd_rsp; @@ -469,72 +454,28 @@ static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv, return -EINVAL; memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr)); - - rcu_read_lock(); - - /* Check if we are able to retrieve the station */ - sta = ieee80211_find_sta(vif, sta_addr); - if (!sta) { - rcu_read_unlock(); - return -EINVAL; - } - ista = (struct ath9k_htc_sta *) sta->drv_priv; - if (oper) - ista->tid_state[tid] = AGGR_START; - else - ista->tid_state[tid] = AGGR_STOP; - aggr.sta_index = ista->index; - - rcu_read_unlock(); - - aggr.tidno = tid; - aggr.aggr_enable = oper; + aggr.tidno = tid & 0xf; + aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false; WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); if (ret) ath_print(common, ATH_DBG_CONFIG, "Unable to %s TX aggregation for (%pM, %d)\n", - (oper) ? "start" : "stop", sta->addr, tid); + (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid); else ath_print(common, ATH_DBG_CONFIG, - "%s aggregation for (%pM, %d)\n", - (oper) ? "Starting" : "Stopping", sta->addr, tid); - - return ret; -} + "%s TX aggregation for (%pM, %d)\n", + (aggr.aggr_enable) ? "Starting" : "Stopping", + sta->addr, tid); -void ath9k_htc_aggr_work(struct work_struct *work) -{ - int ret = 0; - struct ath9k_htc_priv *priv = - container_of(work, struct ath9k_htc_priv, - ath9k_aggr_work.work); - struct ath9k_htc_aggr_work *wk = &priv->aggr_work; - - mutex_lock(&wk->mutex); - - switch (wk->action) { - case IEEE80211_AMPDU_TX_START: - ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr, - wk->tid, true); - if (!ret) - ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr, - wk->tid); - break; - case IEEE80211_AMPDU_TX_STOP: - ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr, - wk->tid, false); - ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid); - break; - default: - ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL, - "Unknown AMPDU action\n"); - } + spin_lock_bh(&priv->tx_lock); + ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP; + spin_unlock_bh(&priv->tx_lock); - mutex_unlock(&wk->mutex); + return ret; } /*********/ @@ -552,8 +493,7 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file) static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath9k_htc_priv *priv = - (struct ath9k_htc_priv *) file->private_data; + struct ath9k_htc_priv *priv = file->private_data; struct ath9k_htc_target_stats cmd_rsp; char buf[512]; unsigned int len = 0; @@ -584,6 +524,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, len += snprintf(buf + len, sizeof(buf) - len, "%19s : %10u\n", "TX Rate", priv->debug.txrate); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -596,8 +539,7 @@ static const struct file_operations fops_tgt_stats = { static ssize_t read_file_xmit(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath9k_htc_priv *priv = - (struct ath9k_htc_priv *) file->private_data; + struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len = 0; @@ -617,6 +559,22 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, "%20s : %10u\n", "SKBs dropped", priv->debug.tx_stats.skb_dropped); + len += snprintf(buf + len, sizeof(buf) - len, + "%20s : %10u\n", "BE queued", + priv->debug.tx_stats.queue_stats[WME_AC_BE]); + len += snprintf(buf + len, sizeof(buf) - len, + "%20s : %10u\n", "BK queued", + priv->debug.tx_stats.queue_stats[WME_AC_BK]); + len += snprintf(buf + len, sizeof(buf) - len, + "%20s : %10u\n", "VI queued", + priv->debug.tx_stats.queue_stats[WME_AC_VI]); + len += snprintf(buf + len, sizeof(buf) - len, + "%20s : %10u\n", "VO queued", + priv->debug.tx_stats.queue_stats[WME_AC_VO]); + + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -629,8 +587,7 @@ static const struct file_operations fops_xmit = { static ssize_t read_file_recv(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct ath9k_htc_priv *priv = - (struct ath9k_htc_priv *) file->private_data; + struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len = 0; @@ -644,6 +601,9 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, "%20s : %10u\n", "SKBs Dropped", priv->debug.rx_stats.skb_dropped); + if (len > sizeof(buf)) + len = sizeof(buf); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -978,6 +938,8 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv) priv->ah->led_pin = ATH_LED_PIN_9287; else if (AR_SREV_9271(priv->ah)) priv->ah->led_pin = ATH_LED_PIN_9271; + else if (AR_DEVID_7010(priv->ah)) + priv->ah->led_pin = ATH_LED_PIN_7010; else priv->ah->led_pin = ATH_LED_PIN_DEF; @@ -1054,6 +1016,95 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv) wiphy_rfkill_start_polling(priv->hw->wiphy); } +static void ath9k_htc_radio_enable(struct ieee80211_hw *hw) +{ + struct ath9k_htc_priv *priv = hw->priv; + struct ath_hw *ah = priv->ah; + struct ath_common *common = ath9k_hw_common(ah); + int ret; + u8 cmd_rsp; + + if (!ah->curchan) + ah->curchan = ath9k_cmn_get_curchannel(hw, ah); + + /* Reset the HW */ + ret = ath9k_hw_reset(ah, ah->curchan, false); + if (ret) { + ath_print(common, ATH_DBG_FATAL, + "Unable to reset hardware; reset status %d " + "(freq %u MHz)\n", ret, ah->curchan->channel); + } + + ath_update_txpow(priv); + + /* Start RX */ + WMI_CMD(WMI_START_RECV_CMDID); + ath9k_host_rx_init(priv); + + /* Start TX */ + htc_start(priv->htc); + spin_lock_bh(&priv->tx_lock); + priv->tx_queues_stop = false; + spin_unlock_bh(&priv->tx_lock); + ieee80211_wake_queues(hw); + + WMI_CMD(WMI_ENABLE_INTR_CMDID); + + /* Enable LED */ + ath9k_hw_cfg_output(ah, ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_set_gpio(ah, ah->led_pin, 0); +} + +static void ath9k_htc_radio_disable(struct ieee80211_hw *hw) +{ + struct ath9k_htc_priv *priv = hw->priv; + struct ath_hw *ah = priv->ah; + struct ath_common *common = ath9k_hw_common(ah); + int ret; + u8 cmd_rsp; + + ath9k_htc_ps_wakeup(priv); + + /* Disable LED */ + ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + + WMI_CMD(WMI_DISABLE_INTR_CMDID); + + /* Stop TX */ + ieee80211_stop_queues(hw); + htc_stop(priv->htc); + WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); + skb_queue_purge(&priv->tx_queue); + + /* Stop RX */ + WMI_CMD(WMI_STOP_RECV_CMDID); + + /* + * The MIB counters have to be disabled here, + * since the target doesn't do it. + */ + ath9k_hw_disable_mib_counters(ah); + + if (!ah->curchan) + ah->curchan = ath9k_cmn_get_curchannel(hw, ah); + + /* Reset the HW */ + ret = ath9k_hw_reset(ah, ah->curchan, false); + if (ret) { + ath_print(common, ATH_DBG_FATAL, + "Unable to reset hardware; reset status %d " + "(freq %u MHz)\n", ret, ah->curchan->channel); + } + + /* Disable the PHY */ + ath9k_hw_phy_disable(ah); + + ath9k_htc_ps_restore(priv); + ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); +} + /**********************/ /* mac80211 Callbacks */ /**********************/ @@ -1099,7 +1150,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) return 0; } -static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led) +static int ath9k_htc_start(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; @@ -1111,10 +1162,16 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led) __be16 htc_mode; u8 cmd_rsp; + mutex_lock(&priv->mutex); + ath_print(common, ATH_DBG_CONFIG, "Starting driver with initial channel: %d MHz\n", curchan->center_freq); + /* Ensure that HW is awake before flushing RX */ + ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); + WMI_CMD(WMI_FLUSH_RECV_CMDID); + /* setup initial channel */ init_channel = ath9k_cmn_get_curchannel(hw, ah); @@ -1127,6 +1184,7 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led) ath_print(common, ATH_DBG_FATAL, "Unable to reset hardware; reset status %d " "(freq %u MHz)\n", ret, curchan->center_freq); + mutex_unlock(&priv->mutex); return ret; } @@ -1147,31 +1205,14 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led) priv->tx_queues_stop = false; spin_unlock_bh(&priv->tx_lock); - if (led) { - /* Enable LED */ - ath9k_hw_cfg_output(ah, ah->led_pin, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - ath9k_hw_set_gpio(ah, ah->led_pin, 0); - } - ieee80211_wake_queues(hw); - return ret; -} - -static int ath9k_htc_start(struct ieee80211_hw *hw) -{ - struct ath9k_htc_priv *priv = hw->priv; - int ret = 0; - - mutex_lock(&priv->mutex); - ret = ath9k_htc_radio_enable(hw, false); mutex_unlock(&priv->mutex); return ret; } -static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led) +static void ath9k_htc_stop(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; @@ -1179,21 +1220,17 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led) int ret = 0; u8 cmd_rsp; + mutex_lock(&priv->mutex); + if (priv->op_flags & OP_INVALID) { ath_print(common, ATH_DBG_ANY, "Device not present\n"); + mutex_unlock(&priv->mutex); return; } - if (led) { - /* Disable LED */ - ath9k_hw_set_gpio(ah, ah->led_pin, 1); - ath9k_hw_cfg_gpio_input(ah, ah->led_pin); - } - /* Cancel all the running timers/work .. */ cancel_work_sync(&priv->ps_work); cancel_delayed_work_sync(&priv->ath9k_ani_work); - cancel_delayed_work_sync(&priv->ath9k_aggr_work); cancel_delayed_work_sync(&priv->ath9k_led_blink_work); ath9k_led_stop_brightness(priv); @@ -1202,12 +1239,6 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led) WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); - ath9k_hw_phy_disable(ah); - ath9k_hw_disable(ah); - ath9k_hw_configpcipowersave(ah, 1, 1); - ath9k_htc_ps_restore(priv); - ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); - skb_queue_purge(&priv->tx_queue); /* Remove monitor interface here */ @@ -1220,21 +1251,18 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led) "Monitor interface removed\n"); } + ath9k_hw_phy_disable(ah); + ath9k_hw_disable(ah); + ath9k_hw_configpcipowersave(ah, 1, 1); + ath9k_htc_ps_restore(priv); + ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); + priv->op_flags |= OP_INVALID; ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); -} - -static void ath9k_htc_stop(struct ieee80211_hw *hw) -{ - struct ath9k_htc_priv *priv = hw->priv; - - mutex_lock(&priv->mutex); - ath9k_htc_radio_disable(hw, false); mutex_unlock(&priv->mutex); } - static int ath9k_htc_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1302,6 +1330,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, out: ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); + return ret; } @@ -1318,6 +1347,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); memcpy(&hvif.myaddr, vif->addr, ETH_ALEN); @@ -1328,6 +1358,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, ath9k_htc_remove_station(priv, vif, NULL); priv->vif = NULL; + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } @@ -1343,30 +1374,27 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) bool enable_radio = false; bool idle = !!(conf->flags & IEEE80211_CONF_IDLE); + mutex_lock(&priv->htc_pm_lock); if (!idle && priv->ps_idle) enable_radio = true; - priv->ps_idle = idle; + mutex_unlock(&priv->htc_pm_lock); if (enable_radio) { - ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); - ath9k_htc_radio_enable(hw, true); ath_print(common, ATH_DBG_CONFIG, "not-idle: enabling radio\n"); + ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); + ath9k_htc_radio_enable(hw); } } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { struct ieee80211_channel *curchan = hw->conf.channel; int pos = curchan->hw_value; - bool is_cw40 = false; ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", curchan->center_freq); - if (check_rc_update(hw, &is_cw40)) - ath9k_htc_rc_update(priv, is_cw40); - ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]); if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { @@ -1399,14 +1427,21 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) } } - if (priv->ps_idle) { + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + mutex_lock(&priv->htc_pm_lock); + if (!priv->ps_idle) { + mutex_unlock(&priv->htc_pm_lock); + goto out; + } + mutex_unlock(&priv->htc_pm_lock); + ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); - ath9k_htc_radio_disable(hw, true); + ath9k_htc_radio_disable(hw); } +out: mutex_unlock(&priv->mutex); - return 0; } @@ -1428,8 +1463,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, u32 rfilt; mutex_lock(&priv->mutex); - ath9k_htc_ps_wakeup(priv); + changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; @@ -1444,30 +1479,38 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, mutex_unlock(&priv->mutex); } -static void ath9k_htc_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) +static int ath9k_htc_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct ath9k_htc_priv *priv = hw->priv; int ret; mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); + ret = ath9k_htc_add_station(priv, vif, sta); + if (!ret) + ath9k_htc_init_rate(priv, sta); + ath9k_htc_ps_restore(priv); + mutex_unlock(&priv->mutex); - switch (cmd) { - case STA_NOTIFY_ADD: - ret = ath9k_htc_add_station(priv, vif, sta); - if (!ret) - ath9k_htc_init_rate(priv, vif, sta); - break; - case STA_NOTIFY_REMOVE: - ath9k_htc_remove_station(priv, vif, sta); - break; - default: - break; - } + return ret; +} + +static int ath9k_htc_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath9k_htc_priv *priv = hw->priv; + int ret; + mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); + ret = ath9k_htc_remove_station(priv, vif, sta); + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); + + return ret; } static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue, @@ -1482,6 +1525,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue, return 0; mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); @@ -1499,9 +1543,16 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue, params->cw_max, params->txop); ret = ath_htc_txq_update(priv, qnum, &qi); - if (ret) + if (ret) { ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); + goto out; + } + if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) && + (qnum == priv->hwq_map[WME_AC_BE])) + ath9k_htc_beaconq_config(priv); +out: + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return ret; @@ -1574,7 +1625,6 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, ath_start_ani(priv); } else { priv->op_flags &= ~OP_ASSOCIATED; - cancel_work_sync(&priv->ps_work); cancel_delayed_work_sync(&priv->ath9k_ani_work); } } @@ -1631,6 +1681,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, ath9k_hw_init_global_settings(ah); } + if (changed & BSS_CHANGED_HT) + ath9k_htc_update_rate(priv, vif, bss_conf); + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } @@ -1641,7 +1694,9 @@ static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw) u64 tsf; mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); tsf = ath9k_hw_gettsf64(priv->ah); + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); return tsf; @@ -1652,7 +1707,9 @@ static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf) struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); ath9k_hw_settsf64(priv->ah, tsf); + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } @@ -1660,11 +1717,11 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; - ath9k_htc_ps_wakeup(priv); mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); ath9k_hw_reset_tsf(priv->ah); - mutex_unlock(&priv->mutex); ath9k_htc_ps_restore(priv); + mutex_unlock(&priv->mutex); } static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, @@ -1674,8 +1731,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, u16 tid, u16 *ssn) { struct ath9k_htc_priv *priv = hw->priv; - struct ath9k_htc_aggr_work *work = &priv->aggr_work; struct ath9k_htc_sta *ista; + int ret = 0; switch (action) { case IEEE80211_AMPDU_RX_START: @@ -1683,26 +1740,26 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_RX_STOP: break; case IEEE80211_AMPDU_TX_START: + ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid); + if (!ret) + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; case IEEE80211_AMPDU_TX_STOP: - if (!(priv->op_flags & OP_TXAGGR)) - return -ENOTSUPP; - memcpy(work->sta_addr, sta->addr, ETH_ALEN); - work->hw = hw; - work->vif = vif; - work->action = action; - work->tid = tid; - ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0); + ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ista = (struct ath9k_htc_sta *) sta->drv_priv; + spin_lock_bh(&priv->tx_lock); ista->tid_state[tid] = AGGR_OPERATIONAL; + spin_unlock_bh(&priv->tx_lock); break; default: ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL, "Unknown AMPDU action\n"); } - return 0; + return ret; } static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw) @@ -1722,8 +1779,8 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw) { struct ath9k_htc_priv *priv = hw->priv; - ath9k_htc_ps_wakeup(priv); mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); spin_lock_bh(&priv->beacon_lock); priv->op_flags &= ~OP_SCANNING; spin_unlock_bh(&priv->beacon_lock); @@ -1731,8 +1788,8 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw) if (priv->op_flags & OP_ASSOCIATED) ath9k_htc_beacon_config(priv, priv->vif); ath_start_ani(priv); - mutex_unlock(&priv->mutex); ath9k_htc_ps_restore(priv); + mutex_unlock(&priv->mutex); } static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value) @@ -1746,8 +1803,10 @@ static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw, struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); priv->ah->coverage_class = coverage_class; ath9k_hw_init_global_settings(priv->ah); + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } @@ -1759,7 +1818,8 @@ struct ieee80211_ops ath9k_htc_ops = { .remove_interface = ath9k_htc_remove_interface, .config = ath9k_htc_config, .configure_filter = ath9k_htc_configure_filter, - .sta_notify = ath9k_htc_sta_notify, + .sta_add = ath9k_htc_sta_add, + .sta_remove = ath9k_htc_sta_remove, .conf_tx = ath9k_htc_conf_tx, .bss_info_changed = ath9k_htc_bss_info_changed, .set_key = ath9k_htc_set_key, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 2571b443ac82102c126baf428b4277f9725eacdc..bd0b4acc3ece542d15413a1409421c1e538b73da 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -20,19 +20,29 @@ /* TX */ /******/ +#define ATH9K_HTC_INIT_TXQ(subtype) do { \ + qi.tqi_subtype = subtype; \ + qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \ + qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \ + qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \ + qi.tqi_physCompBuf = 0; \ + qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | \ + TXQ_FLAG_TXDESCINT_ENABLE; \ + } while (0) + int get_hw_qnum(u16 queue, int *hwq_map) { switch (queue) { case 0: - return hwq_map[ATH9K_WME_AC_VO]; + return hwq_map[WME_AC_VO]; case 1: - return hwq_map[ATH9K_WME_AC_VI]; + return hwq_map[WME_AC_VI]; case 2: - return hwq_map[ATH9K_WME_AC_BE]; + return hwq_map[WME_AC_BE]; case 3: - return hwq_map[ATH9K_WME_AC_BK]; + return hwq_map[WME_AC_BK]; default: - return hwq_map[ATH9K_WME_AC_BE]; + return hwq_map[WME_AC_BE]; } } @@ -71,7 +81,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) struct ath9k_htc_vif *avp; struct ath9k_htc_tx_ctl tx_ctl; enum htc_endpoint_id epid; - u16 qnum, hw_qnum; + u16 qnum; __le16 fc; u8 *tx_fhdr; u8 sta_idx; @@ -131,20 +141,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr)); qnum = skb_get_queue_mapping(skb); - hw_qnum = get_hw_qnum(qnum, priv->hwq_map); - switch (hw_qnum) { + switch (qnum) { case 0: - epid = priv->data_be_ep; + TX_QSTAT_INC(WME_AC_VO); + epid = priv->data_vo_ep; break; - case 2: + case 1: + TX_QSTAT_INC(WME_AC_VI); epid = priv->data_vi_ep; break; - case 3: - epid = priv->data_vo_ep; + case 2: + TX_QSTAT_INC(WME_AC_BE); + epid = priv->data_be_ep; break; - case 1: + case 3: default: + TX_QSTAT_INC(WME_AC_BK); epid = priv->data_bk_ep; break; } @@ -174,6 +187,19 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) return htc_send(priv->htc, skb, epid, &tx_ctl); } +static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv, + struct ath9k_htc_sta *ista, u8 tid) +{ + bool ret = false; + + spin_lock_bh(&priv->tx_lock); + if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP)) + ret = true; + spin_unlock_bh(&priv->tx_lock); + + return ret; +} + void ath9k_tx_tasklet(unsigned long data) { struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; @@ -203,8 +229,7 @@ void ath9k_tx_tasklet(unsigned long data) /* Check if we need to start aggregation */ if (sta && conf_is_ht(&priv->hw->conf) && - (priv->op_flags & OP_TXAGGR) - && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { + !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { if (ieee80211_is_data_qos(fc)) { u8 *qc, tid; struct ath9k_htc_sta *ista; @@ -213,10 +238,11 @@ void ath9k_tx_tasklet(unsigned long data) tid = qc[0] & 0xf; ista = (struct ath9k_htc_sta *)sta->drv_priv; - if ((tid < ATH9K_HTC_MAX_TID) && - ista->tid_state[tid] == AGGR_STOP) { + if (ath9k_htc_check_tx_aggr(priv, ista, tid)) { ieee80211_start_tx_ba_session(sta, tid); + spin_lock_bh(&priv->tx_lock); ista->tid_state[tid] = AGGR_PROGRESS; + spin_unlock_bh(&priv->tx_lock); } } } @@ -284,8 +310,7 @@ void ath9k_tx_cleanup(struct ath9k_htc_priv *priv) } -bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, - enum ath9k_tx_queue_subtype subtype) +bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype) { struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); @@ -293,13 +318,7 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int qnum; memset(&qi, 0, sizeof(qi)); - - qi.tqi_subtype = subtype; - qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; - qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; - qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; - qi.tqi_physCompBuf = 0; - qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE; + ATH9K_HTC_INIT_TXQ(subtype); qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi); if (qnum == -1) @@ -317,6 +336,16 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, return true; } +int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv) +{ + struct ath9k_tx_queue_info qi; + + memset(&qi, 0, sizeof(qi)); + ATH9K_HTC_INIT_TXQ(0); + + return ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_CAB, &qi); +} + /******/ /* RX */ /******/ @@ -387,9 +416,6 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv) /* configure operational mode */ ath9k_hw_setopmode(ah); - /* Handle any link-level address change. */ - ath9k_hw_setmac(ah, common->macaddr); - /* calculate and install multicast filter */ mfilt[0] = mfilt[1] = ~0; ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); @@ -399,7 +425,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv) { ath9k_hw_rxena(priv->ah); ath9k_htc_opmode_init(priv); - ath9k_hw_startpcureceive(priv->ah); + ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING)); priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER; } diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 064397fd738e5572dd2f68dea5f4221b48cfb01b..705c0f342e1c0342ea4000c5ea747ececf31dfce 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -89,7 +89,6 @@ static void htc_process_target_rdy(struct htc_target *target, struct htc_endpoint *endpoint; struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf; - target->credits = be16_to_cpu(htc_ready_msg->credits); target->credit_size = be16_to_cpu(htc_ready_msg->credit_size); endpoint = &target->endpoint[ENDPOINT0]; @@ -159,7 +158,7 @@ static int htc_config_pipe_credits(struct htc_target *target) cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID); cp_msg->pipe_id = USB_WLAN_TX_PIPE; - cp_msg->credits = 28; + cp_msg->credits = target->credits; target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS; diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index 624422a8169e92614327da827d3252892204b220..ffecbadaea4a592450da48c3e849b9bdd1961b07 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -128,6 +128,17 @@ static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds, ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf); } +static inline void ath9k_hw_procmibevent(struct ath_hw *ah) +{ + ath9k_hw_ops(ah)->ani_proc_mib_event(ah); +} + +static inline void ath9k_hw_ani_monitor(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + ath9k_hw_ops(ah)->ani_monitor(ah, chan); +} + /* Private hardware call ops */ /* PHY ops */ @@ -253,12 +264,6 @@ static inline void ath9k_hw_do_getnf(struct ath_hw *ah, ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray); } -static inline void ath9k_hw_loadnf(struct ath_hw *ah, - struct ath9k_channel *chan) -{ - ath9k_hw_private_ops(ah)->loadnf(ah, chan); -} - static inline bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -277,4 +282,9 @@ static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah, return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType); } +static inline void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) +{ + ath9k_hw_private_ops(ah)->ani_reset(ah, is_scanning); +} + #endif /* ATH9K_HW_OPS_H */ diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index c33f17dbe6f17e978191e76ac43bffbbc0978ffe..8d291ccf5c88fcc923af7bceab1ee18eb681741c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -23,11 +23,6 @@ #include "rc.h" #include "ar9003_mac.h" -#define ATH9K_CLOCK_RATE_CCK 22 -#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 -#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 -#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44 - static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); MODULE_AUTHOR("Atheros Communications"); @@ -80,6 +75,15 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah); } +static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah) +{ + /* You will not have this callback if using the old ANI */ + if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs) + return; + + ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah); +} + /********************/ /* Helper Functions */ /********************/ @@ -371,13 +375,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah) ah->config.ofdm_trig_high = 500; ah->config.cck_trig_high = 200; ah->config.cck_trig_low = 100; - - /* - * For now ANI is disabled for AR9003, it is still - * being tested. - */ - if (!AR_SREV_9300_20_OR_LATER(ah)) - ah->config.enable_ani = 1; + ah->config.enable_ani = true; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { ah->config.spurchans[i][0] = AR_NO_SPUR; @@ -390,12 +388,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah) ah->config.ht_enable = 0; ah->config.rx_intr_mitigation = true; - - /* - * Tx IQ Calibration (ah->config.tx_iq_calibration) is only - * used by AR9003, but it is showing reliability issues. - * It will take a while to fix so this is currently disabled. - */ + ah->config.pcieSerDesWrite = true; /* * We need this for PCI devices only (Cardbus, PCI, miniPCI) @@ -433,7 +426,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah) ah->ah_flags = AH_USE_EEPROM; ah->atim_window = 0; - ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE; + ah->sta_id1_defaults = + AR_STA_ID1_CRPT_MIC_ENABLE | + AR_STA_ID1_MCAST_KSRCH; ah->beacon_interval = 100; ah->enable_32kHz_clock = DONT_USE_32KHZ; ah->slottime = (u32) -1; @@ -537,7 +532,8 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || - (AR_SREV_9280(ah) && !ah->is_pciexpress)) { + ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) && + !ah->is_pciexpress)) { ah->config.serialize_regmode = SER_REG_MODE_ON; } else { @@ -571,28 +567,19 @@ static int __ath9k_hw_init(struct ath_hw *ah) ah->ani_function = ATH9K_ANI_ALL; if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; + if (!AR_SREV_9300_20_OR_LATER(ah)) + ah->ani_function &= ~ATH9K_ANI_MRC_CCK; ath9k_hw_init_mode_regs(ah); /* - * Configire PCIE after Ini init. SERDES values now come from ini file - * This enables PCIe low power mode. + * Read back AR_WA into a permanent copy and set bits 14 and 17. + * We need to do this to avoid RMW of this register. We cannot + * read the reg when chip is asleep. */ - if (AR_SREV_9300_20_OR_LATER(ah)) { - u32 regval; - unsigned int i; - - /* Set Bits 16 and 17 in the AR_WA register. */ - regval = REG_READ(ah, AR_WA); - regval |= 0x00030000; - REG_WRITE(ah, AR_WA, regval); - - for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) { - REG_WRITE(ah, - INI_RA(&ah->iniPcieSerdesLowPower, i, 0), - INI_RA(&ah->iniPcieSerdesLowPower, i, 1)); - } - } + ah->WARegVal = REG_READ(ah, AR_WA); + ah->WARegVal |= (AR_WA_D3_L1_DISABLE | + AR_WA_ASPM_TIMER_BASED_DISABLE); if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, 0, 0); @@ -623,10 +610,8 @@ static int __ath9k_hw_init(struct ath_hw *ah) else ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); - if (AR_SREV_9300_20_OR_LATER(ah)) - ar9003_hw_set_nf_limits(ah); - ath9k_init_nfcal_hist_buffer(ah); + ah->bb_watchdog_timeout_ms = 25; common->state = ATH_HW_INITIALIZED; @@ -1012,6 +997,11 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) ENABLE_REGWRITE_BUFFER(ah); + if (AR_SREV_9300_20_OR_LATER(ah)) { + REG_WRITE(ah, AR_WA, ah->WARegVal); + udelay(10); + } + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); @@ -1066,6 +1056,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) { ENABLE_REGWRITE_BUFFER(ah); + if (AR_SREV_9300_20_OR_LATER(ah)) { + REG_WRITE(ah, AR_WA, ah->WARegVal); + udelay(10); + } + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); @@ -1073,6 +1068,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) REG_WRITE(ah, AR_RC, AR_RC_AHB); REG_WRITE(ah, AR_RTC_RESET, 0); + udelay(2); REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); @@ -1102,6 +1098,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) { + if (AR_SREV_9300_20_OR_LATER(ah)) { + REG_WRITE(ah, AR_WA, ah->WARegVal); + udelay(10); + } + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); @@ -1232,9 +1233,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (!ah->chip_fullsleep) { ath9k_hw_abortpcurecv(ah); - if (!ath9k_hw_stopdmarecv(ah)) + if (!ath9k_hw_stopdmarecv(ah)) { ath_print(common, ATH_DBG_XMIT, "Failed to stop receive dma\n"); + bChannelChange = false; + } } if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) @@ -1265,7 +1268,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; /* For chips on which RTC reset is done, save TSF before it gets cleared */ - if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + if (AR_SREV_9100(ah) || + (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))) tsf = ath9k_hw_gettsf64(ah); saveLedState = REG_READ(ah, AR_CFG_LED) & @@ -1297,16 +1301,30 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, } /* Restore TSF */ - if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + if (tsf) ath9k_hw_settsf64(ah, tsf); if (AR_SREV_9280_10_OR_LATER(ah)) REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); + if (!AR_SREV_9300_20_OR_LATER(ah)) + ar9002_hw_enable_async_fifo(ah); + r = ath9k_hw_process_ini(ah, chan); if (r) return r; + /* + * Some AR91xx SoC devices frequently fail to accept TSF writes + * right after the chip reset. When that happens, write a new + * value after the initvals have been applied, with an offset + * based on measured time difference + */ + if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { + tsf += 1500; + ath9k_hw_settsf64(ah, tsf); + } + /* Setup MFP options for CCMP */ if (AR_SREV_9280_20_OR_LATER(ah)) { /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt @@ -1367,6 +1385,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_resettxqueue(ah, i); ath9k_hw_init_interrupt_masks(ah, ah->opmode); + ath9k_hw_ani_cache_ini_regs(ah); ath9k_hw_init_qos(ah); if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) @@ -1375,7 +1394,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_init_global_settings(ah); if (!AR_SREV_9300_20_OR_LATER(ah)) { - ar9002_hw_enable_async_fifo(ah); + ar9002_hw_update_async_fifo(ah); ar9002_hw_enable_wep_aggregation(ah); } @@ -1426,9 +1445,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); } } else { - /* Configure AR9271 target WLAN */ - if (AR_SREV_9271(ah)) - REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); + if (common->bus_ops->ath_bus_type == ATH_USB) { + /* Configure AR9271 target WLAN */ + if (AR_SREV_9271(ah)) + REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); + else + REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); + } #ifdef __BIG_ENDIAN else REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); @@ -1441,6 +1464,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (AR_SREV_9300_20_OR_LATER(ah)) { ath9k_hw_loadnf(ah, curchan); ath9k_hw_start_nfcal(ah); + ar9003_hw_bb_watchdog_config(ah); } return 0; @@ -1486,9 +1510,10 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry) } EXPORT_SYMBOL(ath9k_hw_keyreset); -bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) +static bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) { u32 macHi, macLo; + u32 unicast_flag = AR_KEYTABLE_VALID; if (entry >= ah->caps.keycache_size) { ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, @@ -1497,6 +1522,16 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) } if (mac != NULL) { + /* + * AR_KEYTABLE_VALID indicates that the address is a unicast + * address, which must match the transmitter address for + * decrypting frames. + * Not setting this bit allows the hardware to use the key + * for multicast frame decryption. + */ + if (mac[0] & 0x01) + unicast_flag = 0; + macHi = (mac[5] << 8) | mac[4]; macLo = (mac[3] << 24) | (mac[2] << 16) | @@ -1509,11 +1544,10 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) macLo = macHi = 0; } REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); - REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID); + REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag); return true; } -EXPORT_SYMBOL(ath9k_hw_keysetmac); bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, const struct ath9k_keyval *k, @@ -1714,17 +1748,6 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, } EXPORT_SYMBOL(ath9k_hw_set_keycache_entry); -bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry) -{ - if (entry < ah->caps.keycache_size) { - u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry)); - if (val & AR_KEYTABLE_VALID) - return true; - } - return false; -} -EXPORT_SYMBOL(ath9k_hw_keyisvalid); - /******************************/ /* Power Management (Chipset) */ /******************************/ @@ -1751,6 +1774,11 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) REG_CLR_BIT(ah, (AR_RTC_RESET), AR_RTC_RESET_EN); } + + /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ + if (AR_SREV_9300_20_OR_LATER(ah)) + REG_WRITE(ah, AR_WA, + ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } /* @@ -1777,6 +1805,10 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) AR_RTC_FORCE_WAKE_EN); } } + + /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ + if (AR_SREV_9300_20_OR_LATER(ah)) + REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) @@ -1784,6 +1816,12 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) u32 val; int i; + /* Set Bits 14 and 17 of AR_WA before powering on the chip. */ + if (AR_SREV_9300_20_OR_LATER(ah)) { + REG_WRITE(ah, AR_WA, ah->WARegVal); + udelay(10); + } + if (setChip) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { @@ -2138,6 +2176,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) if (AR_SREV_9271(ah)) pCap->num_gpio_pins = AR9271_NUM_GPIO; + else if (AR_DEVID_7010(ah)) + pCap->num_gpio_pins = AR7010_NUM_GPIO; else if (AR_SREV_9285_10_OR_LATER(ah)) pCap->num_gpio_pins = AR9285_NUM_GPIO; else if (AR_SREV_9280_10_OR_LATER(ah)) @@ -2165,7 +2205,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; } #endif - if (AR_SREV_9271(ah)) + if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah)) pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP; else pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; @@ -2220,6 +2260,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); + if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) + pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); if (AR_SREV_9280_20(ah) && @@ -2232,100 +2274,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) if (AR_SREV_9300_20_OR_LATER(ah)) pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; - return 0; -} - -bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type, - u32 capability, u32 *result) -{ - struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); - switch (type) { - case ATH9K_CAP_CIPHER: - switch (capability) { - case ATH9K_CIPHER_AES_CCM: - case ATH9K_CIPHER_AES_OCB: - case ATH9K_CIPHER_TKIP: - case ATH9K_CIPHER_WEP: - case ATH9K_CIPHER_MIC: - case ATH9K_CIPHER_CLR: - return true; - default: - return false; - } - case ATH9K_CAP_TKIP_MIC: - switch (capability) { - case 0: - return true; - case 1: - return (ah->sta_id1_defaults & - AR_STA_ID1_CRPT_MIC_ENABLE) ? true : - false; - } - case ATH9K_CAP_TKIP_SPLIT: - return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ? - false : true; - case ATH9K_CAP_MCAST_KEYSRCH: - switch (capability) { - case 0: - return true; - case 1: - if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) { - return false; - } else { - return (ah->sta_id1_defaults & - AR_STA_ID1_MCAST_KSRCH) ? true : - false; - } - } - return false; - case ATH9K_CAP_TXPOW: - switch (capability) { - case 0: - return 0; - case 1: - *result = regulatory->power_limit; - return 0; - case 2: - *result = regulatory->max_power_level; - return 0; - case 3: - *result = regulatory->tp_scale; - return 0; - } - return false; - case ATH9K_CAP_DS: - return (AR_SREV_9280_20_OR_LATER(ah) && - (ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1)) - ? false : true; - default: - return false; - } -} -EXPORT_SYMBOL(ath9k_hw_getcapability); + if (AR_SREV_9287_10_OR_LATER(ah) || AR_SREV_9271(ah)) + pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; -bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, - u32 capability, u32 setting, int *status) -{ - switch (type) { - case ATH9K_CAP_TKIP_MIC: - if (setting) - ah->sta_id1_defaults |= - AR_STA_ID1_CRPT_MIC_ENABLE; - else - ah->sta_id1_defaults &= - ~AR_STA_ID1_CRPT_MIC_ENABLE; - return true; - case ATH9K_CAP_MCAST_KEYSRCH: - if (setting) - ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH; - else - ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH; - return true; - default: - return false; - } + return 0; } -EXPORT_SYMBOL(ath9k_hw_setcapability); /****************************/ /* GPIO / RFKILL / Antennae */ @@ -2365,8 +2318,15 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) BUG_ON(gpio >= ah->caps.num_gpio_pins); - gpio_shift = gpio << 1; + if (AR_DEVID_7010(ah)) { + gpio_shift = gpio; + REG_RMW(ah, AR7010_GPIO_OE, + (AR7010_GPIO_OE_AS_INPUT << gpio_shift), + (AR7010_GPIO_OE_MASK << gpio_shift)); + return; + } + gpio_shift = gpio << 1; REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), @@ -2382,7 +2342,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) if (gpio >= ah->caps.num_gpio_pins) return 0xffffffff; - if (AR_SREV_9300_20_OR_LATER(ah)) + if (AR_DEVID_7010(ah)) { + u32 val; + val = REG_READ(ah, AR7010_GPIO_IN); + return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; + } else if (AR_SREV_9300_20_OR_LATER(ah)) return MS_REG_READ(AR9300, gpio) != 0; else if (AR_SREV_9271(ah)) return MS_REG_READ(AR9271, gpio) != 0; @@ -2402,10 +2366,16 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, { u32 gpio_shift; - ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); + if (AR_DEVID_7010(ah)) { + gpio_shift = gpio; + REG_RMW(ah, AR7010_GPIO_OE, + (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift), + (AR7010_GPIO_OE_MASK << gpio_shift)); + return; + } + ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); gpio_shift = 2 * gpio; - REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), @@ -2415,6 +2385,13 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output); void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) { + if (AR_DEVID_7010(ah)) { + val = val ? 0 : 1; + REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio), + AR_GPIO_BIT(gpio)); + return; + } + if (AR_SREV_9271(ah)) val = ~val; @@ -2520,12 +2497,6 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit) } EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); -void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac) -{ - memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN); -} -EXPORT_SYMBOL(ath9k_hw_setmac); - void ath9k_hw_setopmode(struct ath_hw *ah) { ath9k_hw_set_operating_mode(ah, ah->opmode); @@ -2598,21 +2569,6 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting) } EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); -/* - * Extend 15-bit time stamp from rx descriptor to - * a full 64-bit TSF using the current h/w TSF. -*/ -u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp) -{ - u64 tsf; - - tsf = ath9k_hw_gettsf64(ah); - if ((tsf & 0x7fff) < rstamp) - tsf -= 0x8000; - return (tsf & ~0x7fff) | rstamp; -} -EXPORT_SYMBOL(ath9k_hw_extend_tsf); - void ath9k_hw_set11nmac2040(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 77245dff599358e5070c92293df0dc97d2f1bd1f..2d30efc0b94fd95587c493ad905512debb1cb14f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -158,6 +158,9 @@ #define ATH9K_HW_RX_HP_QDEPTH 16 #define ATH9K_HW_RX_LP_QDEPTH 128 +#define PAPRD_GAIN_TABLE_ENTRIES 32 +#define PAPRD_TABLE_SZ 24 + enum ath_ini_subsys { ATH_INI_PRE = 0, ATH_INI_CORE, @@ -199,15 +202,8 @@ enum ath9k_hw_caps { ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18), ATH9K_HW_CAP_LDPC = BIT(19), ATH9K_HW_CAP_FASTCLOCK = BIT(20), -}; - -enum ath9k_capability_type { - ATH9K_CAP_CIPHER = 0, - ATH9K_CAP_TKIP_MIC, - ATH9K_CAP_TKIP_SPLIT, - ATH9K_CAP_TXPOW, - ATH9K_CAP_MCAST_KEYSRCH, - ATH9K_CAP_DS + ATH9K_HW_CAP_SGI_20 = BIT(21), + ATH9K_HW_CAP_PAPRD = BIT(22), }; struct ath9k_hw_capabilities { @@ -237,8 +233,9 @@ struct ath9k_ops_config { int sw_beacon_response_time; int additional_swba_backoff; int ack_6mb; - int cwm_ignore_extcca; + u32 cwm_ignore_extcca; u8 pcie_powersave_enable; + bool pcieSerDesWrite; u8 pcie_clock_req; u32 pcie_waen; u8 analog_shiftreg; @@ -262,10 +259,10 @@ struct ath9k_ops_config { #define AR_BASE_FREQ_5GHZ 4900 #define AR_SPUR_FEEQ_BOUND_HT40 19 #define AR_SPUR_FEEQ_BOUND_HT20 10 - bool tx_iq_calibration; /* Only available for >= AR9003 */ int spurmode; u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; u8 max_txtrig_level; + u16 ani_poll_interval; /* ANI poll interval in ms */ }; enum ath9k_int { @@ -279,6 +276,7 @@ enum ath9k_int { ATH9K_INT_TX = 0x00000040, ATH9K_INT_TXDESC = 0x00000080, ATH9K_INT_TIM_TIMER = 0x00000100, + ATH9K_INT_BB_WATCHDOG = 0x00000400, ATH9K_INT_TXURN = 0x00000800, ATH9K_INT_MIB = 0x00001000, ATH9K_INT_RXPHY = 0x00004000, @@ -358,6 +356,9 @@ struct ath9k_channel { int8_t iCoff; int8_t qCoff; int16_t rawNoiseFloor; + bool paprd_done; + u16 small_signal_gain[AR9300_MAX_CHAINS]; + u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; }; #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ @@ -459,7 +460,7 @@ struct ath9k_hw_version { #define AR_GENTMR_BIT(_index) (1 << (_index)) /* - * Using de Bruijin sequence to to look up 1's index in a 32 bit number + * Using de Bruijin sequence to look up 1's index in a 32 bit number * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001 */ #define debruijn32 0x077CB531U @@ -509,7 +510,17 @@ struct ath_gen_timer_table { * AR_RTC_PLL_CONTROL for a given channel * @setup_calibration: set up calibration * @iscal_supported: used to query if a type of calibration is supported - * @loadnf: load noise floor read from each chain on the CCA registers + * + * @ani_reset: reset ANI parameters to default values + * @ani_lower_immunity: lower the noise immunity level. The level controls + * the power-based packet detection on hardware. If a power jump is + * detected the adapter takes it as an indication that a packet has + * arrived. The level ranges from 0-5. Each level corresponds to a + * few dB more of noise immunity. If you have a strong time-varying + * interference that is causing false detections (OFDM timing errors or + * CCK timing errors) the level can be increased. + * @ani_cache_ini_regs: cache the values for ANI from the initial + * register settings through the register initialization. */ struct ath_hw_private_ops { /* Calibration ops */ @@ -552,7 +563,11 @@ struct ath_hw_private_ops { bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd, int param); void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]); - void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan); + + /* ANI */ + void (*ani_reset)(struct ath_hw *ah, bool is_scanning); + void (*ani_lower_immunity)(struct ath_hw *ah); + void (*ani_cache_ini_regs)(struct ath_hw *ah); }; /** @@ -563,6 +578,11 @@ struct ath_hw_private_ops { * * @config_pci_powersave: * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC + * + * @ani_proc_mib_event: process MIB events, this would happen upon specific ANI + * thresholds being reached or having overflowed. + * @ani_monitor: called periodically by the core driver to collect + * MIB stats and adjust ANI if specific thresholds have been reached. */ struct ath_hw_ops { void (*config_pci_powersave)(struct ath_hw *ah, @@ -603,6 +623,15 @@ struct ath_hw_ops { u32 burstDuration); void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds, u32 vmf); + + void (*ani_proc_mib_event)(struct ath_hw *ah); + void (*ani_monitor)(struct ath_hw *ah, struct ath9k_channel *chan); +}; + +struct ath_nf_limits { + s16 max; + s16 min; + s16 nominal; }; struct ath_hw { @@ -626,10 +655,10 @@ struct ath_hw { bool is_pciexpress; bool need_an_top2_fixup; u16 tx_trig_level; - s16 nf_2g_max; - s16 nf_2g_min; - s16 nf_5g_max; - s16 nf_5g_min; + + u32 nf_regs[6]; + struct ath_nf_limits nf_2g; + struct ath_nf_limits nf_5g; u16 rfsilent; u32 rfkill_gpio; u32 rfkill_polarity; @@ -789,6 +818,18 @@ struct ath_hw { u32 ts_paddr_end; u16 ts_tail; u8 ts_size; + + u32 bb_watchdog_last_status; + u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */ + + u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES]; + u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES]; + /* + * Store the permanent value of Reg 0x4004in WARegVal + * so we dont have to R/M/W. We should not be reading + * this register when in sleep states. + */ + u32 WARegVal; }; static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) @@ -811,6 +852,12 @@ static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah) return &ah->ops; } +static inline int sign_extend(int val, const int nbits) +{ + int order = BIT(nbits-1); + return (val ^ order) - order; +} + /* Initialization, Detach, Reset */ const char *ath9k_hw_probe(u16 vendorid, u16 devid); void ath9k_hw_deinit(struct ath_hw *ah); @@ -818,19 +865,13 @@ int ath9k_hw_init(struct ath_hw *ah); int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, bool bChannelChange); int ath9k_hw_fill_cap_info(struct ath_hw *ah); -bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type, - u32 capability, u32 *result); -bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, - u32 capability, u32 setting, int *status); u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); /* Key Cache Management */ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry); -bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac); bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, const struct ath9k_keyval *k, const u8 *mac); -bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry); /* GPIO / RFKILL / Antennae */ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio); @@ -856,7 +897,6 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits); bool ath9k_hw_phy_disable(struct ath_hw *ah); bool ath9k_hw_disable(struct ath_hw *ah); void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit); -void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac); void ath9k_hw_setopmode(struct ath_hw *ah); void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); void ath9k_hw_setbssidmask(struct ath_hw *ah); @@ -865,7 +905,6 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah); void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); void ath9k_hw_reset_tsf(struct ath_hw *ah); void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); -u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp); void ath9k_hw_init_global_settings(struct ath_hw *ah); void ath9k_hw_set11nmac2040(struct ath_hw *ah); void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); @@ -907,13 +946,25 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, void ar9002_hw_cck_chan14_spread(struct ath_hw *ah); int ar9002_hw_rf_claim(struct ath_hw *ah); void ar9002_hw_enable_async_fifo(struct ath_hw *ah); +void ar9002_hw_update_async_fifo(struct ath_hw *ah); void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah); /* - * Code specifric to AR9003, we stuff these here to avoid callbacks + * Code specific to AR9003, we stuff these here to avoid callbacks * for older families */ -void ar9003_hw_set_nf_limits(struct ath_hw *ah); +void ar9003_hw_bb_watchdog_config(struct ath_hw *ah); +void ar9003_hw_bb_watchdog_read(struct ath_hw *ah); +void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); +void ar9003_paprd_enable(struct ath_hw *ah, bool val); +void ar9003_paprd_populate_single_table(struct ath_hw *ah, + struct ath9k_channel *chan, int chain); +int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan, + int chain); +int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); +int ar9003_paprd_init_table(struct ath_hw *ah); +bool ar9003_paprd_is_done(struct ath_hw *ah); +void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains); /* Hardware family op attach helpers */ void ar5008_hw_attach_phy_ops(struct ath_hw *ah); @@ -926,8 +977,24 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah); void ar9002_hw_attach_ops(struct ath_hw *ah); void ar9003_hw_attach_ops(struct ath_hw *ah); +/* + * ANI work can be shared between all families but a next + * generation implementation of ANI will be used only for AR9003 only + * for now as the other families still need to be tested with the same + * next generation ANI. Feel free to start testing it though for the + * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani. + */ +extern int modparam_force_new_ani; +void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah); +void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah); + #define ATH_PCIE_CAP_LINK_CTRL 0x70 #define ATH_PCIE_CAP_LINK_L0S 1 #define ATH_PCIE_CAP_LINK_L1 2 +#define ATH9K_CLOCK_RATE_CCK 22 +#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 +#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 +#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44 + #endif diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d457cb3bd7720af182d929599f38e5d46fcac2d8..243c1775f343f0b3400b0675708ee292bcb54e74 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -33,6 +33,10 @@ int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); +int led_blink = 1; +module_param_named(blink, led_blink, int, 0444); +MODULE_PARM_DESC(blink, "Enable LED blink on activity"); + /* We use the hw_value as an index into our private channel structure */ #define CHAN2G(_freq, _idx) { \ @@ -175,18 +179,6 @@ static const struct ath_ops ath9k_common_ops = { .write = ath9k_iowrite32, }; -static int count_streams(unsigned int chainmask, int max) -{ - int streams = 0; - - do { - if (++streams == max) - break; - } while ((chainmask = chainmask & (chainmask - 1))); - - return streams; -} - /**************************/ /* Initialization */ /**************************/ @@ -208,6 +200,9 @@ static void setup_ht_cap(struct ath_softc *sc, if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC) ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; @@ -224,8 +219,8 @@ static void setup_ht_cap(struct ath_softc *sc, /* set up supported mcs set */ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); - tx_streams = count_streams(common->tx_chainmask, max_streams); - rx_streams = count_streams(common->rx_chainmask, max_streams); + tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams); + rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams); ath_print(common, ATH_DBG_CONFIG, "TX streams %d, RX streams: %d\n", @@ -388,36 +383,14 @@ static void ath9k_init_crypto(struct ath_softc *sc) for (i = 0; i < common->keymax; i++) ath9k_hw_keyreset(sc->sc_ah, (u16) i); - if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER, - ATH9K_CIPHER_TKIP, NULL)) { - /* - * Whether we should enable h/w TKIP MIC. - * XXX: if we don't support WME TKIP MIC, then we wouldn't - * report WMM capable, so it's always safe to turn on - * TKIP MIC in this case. - */ - ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL); - } - /* * Check whether the separate key cache entries * are required to handle both tx+rx MIC keys. * With split mic keys the number of stations is limited * to 27 otherwise 59. */ - if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER, - ATH9K_CIPHER_TKIP, NULL) - && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER, - ATH9K_CIPHER_MIC, NULL) - && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT, - 0, NULL)) + if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)) common->splitmic = 1; - - /* turn on mcast key search if possible */ - if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) - (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, - 1, 1, NULL); - } static int ath9k_init_btcoex(struct ath_softc *sc) @@ -435,7 +408,7 @@ static int ath9k_init_btcoex(struct ath_softc *sc) r = ath_init_btcoex_timer(sc); if (r) return -1; - qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); + qnum = sc->tx.hwq_map[WME_AC_BE]; ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum); sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; break; @@ -472,23 +445,23 @@ static int ath9k_init_queues(struct ath_softc *sc) sc->config.cabqReadytime = ATH_CABQ_READY_TIME; ath_cabq_update(sc); - if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) { + if (!ath_tx_setup(sc, WME_AC_BK)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for BK traffic\n"); goto err; } - if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) { + if (!ath_tx_setup(sc, WME_AC_BE)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for BE traffic\n"); goto err; } - if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) { + if (!ath_tx_setup(sc, WME_AC_VI)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for VI traffic\n"); goto err; } - if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) { + if (!ath_tx_setup(sc, WME_AC_VO)) { ath_print(common, ATH_DBG_FATAL, "Unable to setup xmit queue for VO traffic\n"); goto err; @@ -745,6 +718,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, goto error_world; } + INIT_WORK(&sc->hw_check_work, ath_hw_check); + INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); sc->wiphy_scheduler_int = msecs_to_jiffies(500); @@ -812,12 +787,12 @@ void ath9k_deinit_device(struct ath_softc *sc) ieee80211_unregister_hw(aphy->hw); ieee80211_free_hw(aphy->hw); } - kfree(sc->sec_wiphy); ieee80211_unregister_hw(hw); ath_rx_cleanup(sc); ath_tx_cleanup(sc); ath9k_deinit_softc(sc); + kfree(sc->sec_wiphy); } void ath_descdma_cleanup(struct ath_softc *sc, diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 0e425cb4bbb1b9b8fb189d17792c9758babb1bd1..e955bb9d98cbe0380ef5d070005b373c1a132b8d 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -15,6 +15,7 @@ */ #include "hw.h" +#include "hw-ops.h" static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, struct ath9k_tx_queue_info *qi) @@ -554,8 +555,13 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); - /* cwmin and cwmax should be 0 for beacon queue */ - if (AR_SREV_9300_20_OR_LATER(ah)) { + /* + * cwmin and cwmax should be 0 for beacon queue + * but not for IBSS as we would create an imbalance + * on beaconing fairness for participating nodes. + */ + if (AR_SREV_9300_20_OR_LATER(ah) && + ah->opmode != NL80211_IFTYPE_ADHOC) { REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) | SM(0, AR_D_LCL_IFS_CWMAX) | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); @@ -756,11 +762,11 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) } EXPORT_SYMBOL(ath9k_hw_putrxbuf); -void ath9k_hw_startpcureceive(struct ath_hw *ah) +void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning) { ath9k_enable_mib_counters(ah); - ath9k_ani_reset(ah); + ath9k_ani_reset(ah, is_scanning); REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); } diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 00f3e0c7528a185e31abe85a2ce520e7524e2a12..2633896d39983499f931b576db97bfdbe5da2e31 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -485,6 +485,9 @@ struct ar5416_desc { #define AR_TxRSSICombined 0xff000000 #define AR_TxRSSICombined_S 24 +#define AR_TxTid 0xf0000000 +#define AR_TxTid_S 28 + #define AR_TxEVM0 ds_txstatus5 #define AR_TxEVM1 ds_txstatus6 #define AR_TxEVM2 ds_txstatus7 @@ -577,13 +580,8 @@ enum ath9k_tx_queue { #define ATH9K_NUM_TX_QUEUES 10 -enum ath9k_tx_queue_subtype { - ATH9K_WME_AC_BK = 0, - ATH9K_WME_AC_BE, - ATH9K_WME_AC_VI, - ATH9K_WME_AC_VO, - ATH9K_WME_UPSD -}; +/* Used as a queue subtype instead of a WMM AC */ +#define ATH9K_WME_UPSD 4 enum ath9k_tx_queue_flags { TXQ_FLAG_TXOKINT_ENABLE = 0x0001, @@ -617,7 +615,7 @@ enum ath9k_pkt_type { struct ath9k_tx_queue_info { u32 tqi_ver; enum ath9k_tx_queue tqi_type; - enum ath9k_tx_queue_subtype tqi_subtype; + int tqi_subtype; enum ath9k_tx_queue_flags tqi_qflags; u32 tqi_priority; u32 tqi_aifs; @@ -715,7 +713,7 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, u32 size, u32 flags); bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); -void ath9k_hw_startpcureceive(struct ath_hw *ah); +void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); void ath9k_hw_stoppcurecv(struct ath_hw *ah); void ath9k_hw_abortpcurecv(struct ath_hw *ah); bool ath9k_hw_stopdmarecv(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1e2a68ea935597a6f3f9499693a59ab8e191841f..0429dda0961fcea24309d045a0af2655de053c25 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -51,13 +51,11 @@ static void ath_cache_conf_rate(struct ath_softc *sc, static void ath_update_txpow(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; - u32 txpow; if (sc->curtxpow != sc->config.txpowlimit) { ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit); /* read back in case value is clamped */ - ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); - sc->curtxpow = txpow; + sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; } } @@ -232,6 +230,114 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, return r; } +static void ath_paprd_activate(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + int chain; + + if (!ah->curchan->paprd_done) + return; + + ath9k_ps_wakeup(sc); + for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { + if (!(ah->caps.tx_chainmask & BIT(chain))) + continue; + + ar9003_paprd_populate_single_table(ah, ah->curchan, chain); + } + + ar9003_paprd_enable(ah, true); + ath9k_ps_restore(sc); +} + +void ath_paprd_calibrate(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work); + struct ieee80211_hw *hw = sc->hw; + struct ath_hw *ah = sc->sc_ah; + struct ieee80211_hdr *hdr; + struct sk_buff *skb = NULL; + struct ieee80211_tx_info *tx_info; + int band = hw->conf.channel->band; + struct ieee80211_supported_band *sband = &sc->sbands[band]; + struct ath_tx_control txctl; + int qnum, ftype; + int chain_ok = 0; + int chain; + int len = 1800; + int time_left; + int i; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return; + + tx_info = IEEE80211_SKB_CB(skb); + + skb_put(skb, len); + memset(skb->data, 0, len); + hdr = (struct ieee80211_hdr *)skb->data; + ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC; + hdr->frame_control = cpu_to_le16(ftype); + hdr->duration_id = cpu_to_le16(10); + memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN); + memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN); + memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); + + memset(&txctl, 0, sizeof(txctl)); + qnum = sc->tx.hwq_map[WME_AC_BE]; + txctl.txq = &sc->tx.txq[qnum]; + + ath9k_ps_wakeup(sc); + ar9003_paprd_init_table(ah); + for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { + if (!(ah->caps.tx_chainmask & BIT(chain))) + continue; + + chain_ok = 0; + memset(tx_info, 0, sizeof(*tx_info)); + tx_info->band = band; + + for (i = 0; i < 4; i++) { + tx_info->control.rates[i].idx = sband->n_bitrates - 1; + tx_info->control.rates[i].count = 6; + } + + init_completion(&sc->paprd_complete); + ar9003_paprd_setup_gain_table(ah, chain); + txctl.paprd = BIT(chain); + if (ath_tx_start(hw, skb, &txctl) != 0) + break; + + time_left = wait_for_completion_timeout(&sc->paprd_complete, + msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); + if (!time_left) { + ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + "Timeout waiting for paprd training on " + "TX chain %d\n", + chain); + goto fail_paprd; + } + + if (!ar9003_paprd_is_done(ah)) + break; + + if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0) + break; + + chain_ok = 1; + } + kfree_skb(skb); + + if (chain_ok) { + ah->curchan->paprd_done = true; + ath_paprd_activate(sc); + } + +fail_paprd: + ath9k_ps_restore(sc); +} + /* * This routine performs the periodic noise floor calibration function * that is used to adjust and optimize the chip performance. This @@ -285,7 +391,8 @@ void ath_ani_calibrate(unsigned long data) } /* Verify whether we must check ANI */ - if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { + if ((timestamp - common->ani.checkani_timer) >= + ah->config.ani_poll_interval) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -326,15 +433,24 @@ void ath_ani_calibrate(unsigned long data) */ cal_interval = ATH_LONG_CALINTERVAL; if (sc->sc_ah->config.enable_ani) - cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); + cal_interval = min(cal_interval, + (u32)ah->config.ani_poll_interval); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && + !(sc->sc_flags & SC_OP_SCANNING)) { + if (!sc->sc_ah->curchan->paprd_done) + ieee80211_queue_work(sc->hw, &sc->paprd_work); + else + ath_paprd_activate(sc); + } } static void ath_start_ani(struct ath_common *common) { + struct ath_hw *ah = common->ah; unsigned long timestamp = jiffies_to_msecs(jiffies); struct ath_softc *sc = (struct ath_softc *) common->priv; @@ -346,7 +462,8 @@ static void ath_start_ani(struct ath_common *common) common->ani.checkani_timer = timestamp; mod_timer(&common->ani.timer, - jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); + jiffies + + msecs_to_jiffies((u32)ah->config.ani_poll_interval)); } /* @@ -398,6 +515,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) ath_tx_node_cleanup(sc, an); } +void ath_hw_check(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work); + int i; + + ath9k_ps_wakeup(sc); + + for (i = 0; i < 3; i++) { + if (ath9k_hw_check_alive(sc->sc_ah)) + goto out; + + msleep(1); + } + ath_reset(sc, false); + +out: + ath9k_ps_restore(sc); +} + void ath9k_tasklet(unsigned long data) { struct ath_softc *sc = (struct ath_softc *)data; @@ -409,13 +545,15 @@ void ath9k_tasklet(unsigned long data) ath9k_ps_wakeup(sc); - if ((status & ATH9K_INT_FATAL) || - !ath9k_hw_check_alive(ah)) { + if (status & ATH9K_INT_FATAL) { ath_reset(sc, false); ath9k_ps_restore(sc); return; } + if (!ath9k_hw_check_alive(ah)) + ieee80211_queue_work(sc->hw, &sc->hw_check_work); + if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); @@ -524,6 +662,12 @@ irqreturn_t ath_isr(int irq, void *dev) !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))) goto chip_reset; + if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && + (status & ATH9K_INT_BB_WATCHDOG)) { + ar9003_hw_bb_watchdog_dbg_info(ah); + goto chip_reset; + } + if (status & ATH9K_INT_SWBA) tasklet_schedule(&sc->bcon_tasklet); @@ -619,234 +763,6 @@ static u32 ath_get_extchanmode(struct ath_softc *sc, return chanmode; } -static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key, - struct ath9k_keyval *hk, const u8 *addr, - bool authenticator) -{ - struct ath_hw *ah = common->ah; - const u8 *key_rxmic; - const u8 *key_txmic; - - key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; - key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; - - if (addr == NULL) { - /* - * Group key installation - only two key cache entries are used - * regardless of splitmic capability since group key is only - * used either for TX or RX. - */ - if (authenticator) { - memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); - memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic)); - } else { - memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); - memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic)); - } - return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr); - } - if (!common->splitmic) { - /* TX and RX keys share the same key cache entry. */ - memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); - memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic)); - return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr); - } - - /* Separate key cache entries for TX and RX */ - - /* TX key goes at first index, RX key at +32. */ - memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); - if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) { - /* TX MIC entry failed. No need to proceed further */ - ath_print(common, ATH_DBG_FATAL, - "Setting TX MIC Key Failed\n"); - return 0; - } - - memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); - /* XXX delete tx key on failure? */ - return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr); -} - -static int ath_reserve_key_cache_slot_tkip(struct ath_common *common) -{ - int i; - - for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) { - if (test_bit(i, common->keymap) || - test_bit(i + 64, common->keymap)) - continue; /* At least one part of TKIP key allocated */ - if (common->splitmic && - (test_bit(i + 32, common->keymap) || - test_bit(i + 64 + 32, common->keymap))) - continue; /* At least one part of TKIP key allocated */ - - /* Found a free slot for a TKIP key */ - return i; - } - return -1; -} - -static int ath_reserve_key_cache_slot(struct ath_common *common) -{ - int i; - - /* First, try to find slots that would not be available for TKIP. */ - if (common->splitmic) { - for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) { - if (!test_bit(i, common->keymap) && - (test_bit(i + 32, common->keymap) || - test_bit(i + 64, common->keymap) || - test_bit(i + 64 + 32, common->keymap))) - return i; - if (!test_bit(i + 32, common->keymap) && - (test_bit(i, common->keymap) || - test_bit(i + 64, common->keymap) || - test_bit(i + 64 + 32, common->keymap))) - return i + 32; - if (!test_bit(i + 64, common->keymap) && - (test_bit(i , common->keymap) || - test_bit(i + 32, common->keymap) || - test_bit(i + 64 + 32, common->keymap))) - return i + 64; - if (!test_bit(i + 64 + 32, common->keymap) && - (test_bit(i, common->keymap) || - test_bit(i + 32, common->keymap) || - test_bit(i + 64, common->keymap))) - return i + 64 + 32; - } - } else { - for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) { - if (!test_bit(i, common->keymap) && - test_bit(i + 64, common->keymap)) - return i; - if (test_bit(i, common->keymap) && - !test_bit(i + 64, common->keymap)) - return i + 64; - } - } - - /* No partially used TKIP slots, pick any available slot */ - for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) { - /* Do not allow slots that could be needed for TKIP group keys - * to be used. This limitation could be removed if we know that - * TKIP will not be used. */ - if (i >= 64 && i < 64 + IEEE80211_WEP_NKID) - continue; - if (common->splitmic) { - if (i >= 32 && i < 32 + IEEE80211_WEP_NKID) - continue; - if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID) - continue; - } - - if (!test_bit(i, common->keymap)) - return i; /* Found a free slot for a key */ - } - - /* No free slot found */ - return -1; -} - -static int ath_key_config(struct ath_common *common, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - struct ath_hw *ah = common->ah; - struct ath9k_keyval hk; - const u8 *mac = NULL; - int ret = 0; - int idx; - - memset(&hk, 0, sizeof(hk)); - - switch (key->alg) { - case ALG_WEP: - hk.kv_type = ATH9K_CIPHER_WEP; - break; - case ALG_TKIP: - hk.kv_type = ATH9K_CIPHER_TKIP; - break; - case ALG_CCMP: - hk.kv_type = ATH9K_CIPHER_AES_CCM; - break; - default: - return -EOPNOTSUPP; - } - - hk.kv_len = key->keylen; - memcpy(hk.kv_val, key->key, key->keylen); - - if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { - /* For now, use the default keys for broadcast keys. This may - * need to change with virtual interfaces. */ - idx = key->keyidx; - } else if (key->keyidx) { - if (WARN_ON(!sta)) - return -EOPNOTSUPP; - mac = sta->addr; - - if (vif->type != NL80211_IFTYPE_AP) { - /* Only keyidx 0 should be used with unicast key, but - * allow this for client mode for now. */ - idx = key->keyidx; - } else - return -EIO; - } else { - if (WARN_ON(!sta)) - return -EOPNOTSUPP; - mac = sta->addr; - - if (key->alg == ALG_TKIP) - idx = ath_reserve_key_cache_slot_tkip(common); - else - idx = ath_reserve_key_cache_slot(common); - if (idx < 0) - return -ENOSPC; /* no free key cache entries */ - } - - if (key->alg == ALG_TKIP) - ret = ath_setkey_tkip(common, idx, key->key, &hk, mac, - vif->type == NL80211_IFTYPE_AP); - else - ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac); - - if (!ret) - return -EIO; - - set_bit(idx, common->keymap); - if (key->alg == ALG_TKIP) { - set_bit(idx + 64, common->keymap); - if (common->splitmic) { - set_bit(idx + 32, common->keymap); - set_bit(idx + 64 + 32, common->keymap); - } - } - - return idx; -} - -static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key) -{ - struct ath_hw *ah = common->ah; - - ath9k_hw_keyreset(ah, key->hw_key_idx); - if (key->hw_key_idx < IEEE80211_WEP_NKID) - return; - - clear_bit(key->hw_key_idx, common->keymap); - if (key->alg != ALG_TKIP) - return; - - clear_bit(key->hw_key_idx + 64, common->keymap); - if (common->splitmic) { - ath9k_hw_keyreset(ah, key->hw_key_idx + 32); - clear_bit(key->hw_key_idx + 32, common->keymap); - clear_bit(key->hw_key_idx + 64 + 32, common->keymap); - } -} - static void ath9k_bss_assoc_info(struct ath_softc *sc, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) @@ -941,9 +857,14 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) ath9k_ps_wakeup(sc); ieee80211_stop_queues(hw); - /* Disable LED */ - ath9k_hw_set_gpio(ah, ah->led_pin, 1); - ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + /* + * Keep the LED on when the radio is disabled + * during idle unassociated state. + */ + if (!sc->ps_idle) { + ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + } /* Disable interrupts */ ath9k_hw_set_interrupts(ah, 0); @@ -1032,25 +953,25 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) return r; } -int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) +static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) { int qnum; switch (queue) { case 0: - qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO]; + qnum = sc->tx.hwq_map[WME_AC_VO]; break; case 1: - qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI]; + qnum = sc->tx.hwq_map[WME_AC_VI]; break; case 2: - qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; + qnum = sc->tx.hwq_map[WME_AC_BE]; break; case 3: - qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK]; + qnum = sc->tx.hwq_map[WME_AC_BK]; break; default: - qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; + qnum = sc->tx.hwq_map[WME_AC_BE]; break; } @@ -1062,16 +983,16 @@ int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc) int qnum; switch (queue) { - case ATH9K_WME_AC_VO: + case WME_AC_VO: qnum = 0; break; - case ATH9K_WME_AC_VI: + case WME_AC_VI: qnum = 1; break; - case ATH9K_WME_AC_BE: + case WME_AC_BE: qnum = 2; break; - case ATH9K_WME_AC_BK: + case WME_AC_BK: qnum = 3; break; default: @@ -1201,7 +1122,9 @@ static int ath9k_start(struct ieee80211_hw *hw) ATH9K_INT_GLOBAL; if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) - ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP; + ah->imask |= ATH9K_INT_RXHP | + ATH9K_INT_RXLP | + ATH9K_INT_BB_WATCHDOG; else ah->imask |= ATH9K_INT_RX; @@ -1251,6 +1174,7 @@ static int ath9k_tx(struct ieee80211_hw *hw, struct ath_tx_control txctl; int padpos, padsize; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + int qnum; if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { ath_print(common, ATH_DBG_XMIT, @@ -1280,7 +1204,8 @@ static int ath9k_tx(struct ieee80211_hw *hw, * completed and if needed, also for RX of buffered frames. */ ath9k_ps_wakeup(sc); - ath9k_hw_setrxabort(sc->sc_ah, 0); + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + ath9k_hw_setrxabort(sc->sc_ah, 0); if (ieee80211_is_pspoll(hdr->frame_control)) { ath_print(common, ATH_DBG_PS, "Sending PS-Poll to pick a buffered frame\n"); @@ -1322,11 +1247,8 @@ static int ath9k_tx(struct ieee80211_hw *hw, memmove(skb->data, skb->data + padsize, padpos); } - /* Check if a tx queue is available */ - - txctl.txq = ath_test_get_txq(sc, skb); - if (!txctl.txq) - goto exit; + qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); + txctl.txq = &sc->tx.txq[qnum]; ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); @@ -1347,15 +1269,25 @@ static void ath9k_stop(struct ieee80211_hw *hw) struct ath_softc *sc = aphy->sc; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); + int i; mutex_lock(&sc->mutex); aphy->state = ATH_WIPHY_INACTIVE; - cancel_delayed_work_sync(&sc->ath_led_blink_work); + if (led_blink) + cancel_delayed_work_sync(&sc->ath_led_blink_work); + cancel_delayed_work_sync(&sc->tx_complete_work); + cancel_work_sync(&sc->paprd_work); + cancel_work_sync(&sc->hw_check_work); + + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i]) + break; + } - if (!sc->num_sec_wiphy) { + if (i == sc->num_sec_wiphy) { cancel_delayed_work_sync(&sc->wiphy_work); cancel_work_sync(&sc->chan_work); } @@ -1547,8 +1479,8 @@ void ath9k_enable_ps(struct ath_softc *sc) ah->imask |= ATH9K_INT_TIM_TIMER; ath9k_hw_set_interrupts(ah, ah->imask); } + ath9k_hw_setrxabort(ah, 1); } - ath9k_hw_setrxabort(ah, 1); } static int ath9k_config(struct ieee80211_hw *hw, u32 changed) @@ -1785,7 +1717,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) - if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret) + if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret) ath_beaconq_config(sc); mutex_unlock(&sc->mutex); @@ -1813,7 +1745,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, switch (cmd) { case SET_KEY: - ret = ath_key_config(common, vif, sta, key); + ret = ath9k_cmn_key_config(common, vif, sta, key); if (ret >= 0) { key->hw_key_idx = ret; /* push IV and Michael MIC generation to stack */ @@ -1826,7 +1758,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, } break; case DISABLE_KEY: - ath_key_delete(common, key); + ath9k_cmn_key_delete(common, key); break; default: ret = -EINVAL; @@ -1999,6 +1931,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, struct ath_softc *sc = aphy->sc; int ret = 0; + local_bh_disable(); + switch (action) { case IEEE80211_AMPDU_RX_START: if (!(sc->sc_flags & SC_OP_RXAGGR)) @@ -2028,6 +1962,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, "Unknown AMPDU action\n"); } + local_bh_enable(); + return ret; } @@ -2058,11 +1994,12 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw) mutex_lock(&sc->mutex); if (ath9k_wiphy_scanning(sc)) { - printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the " - "same time\n"); /* - * Do not allow the concurrent scanning state for now. This - * could be improved with scanning control moved into ath9k. + * There is a race here in mac80211 but fixing it requires + * we revisit how we handle the scan complete callback. + * After mac80211 fixes we will not have configured hardware + * to the home channel nor would we have configured the RX + * filter yet. */ mutex_unlock(&sc->mutex); return; @@ -2072,10 +2009,16 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw) ath9k_wiphy_pause_all_forced(sc, aphy); sc->sc_flags |= SC_OP_SCANNING; del_timer_sync(&common->ani.timer); + cancel_work_sync(&sc->paprd_work); + cancel_work_sync(&sc->hw_check_work); cancel_delayed_work_sync(&sc->tx_complete_work); mutex_unlock(&sc->mutex); } +/* + * XXX: this requires a revisit after the driver + * scan_complete gets moved to another place/removed in mac80211. + */ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) { struct ath_wiphy *aphy = hw->priv; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 1ec836cf1c0da8defa709b6ba983cac744ba9799..b5b651413e77104bdd4bffe3171354287ed0a828 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ + { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ { 0 } }; @@ -208,11 +209,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); - printk(KERN_INFO - "%s: %s mem=0x%lx, irq=%d\n", - wiphy_name(hw->wiphy), - hw_name, - (unsigned long)mem, pdev->irq); + wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", + hw_name, (unsigned long)mem, pdev->irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 8519452c95f1b1024b3584a49a33af14a0501350..e49be733d5461820c8dba6aec394e53700166c66 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -20,93 +20,145 @@ #include "ath9k.h" static const struct ath_rate_table ar5416_11na_ratetable = { - 42, + 68, 8, /* MCS start */ { - { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ - 5400, 0, 12, 0, 0, 0, 0, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ - 7800, 1, 18, 0, 1, 1, 1, 1 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ - 10000, 2, 24, 2, 2, 2, 2, 2 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ - 13900, 3, 36, 2, 3, 3, 3, 3 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ - 17300, 4, 48, 4, 4, 4, 4, 4 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ - 23000, 5, 72, 4, 5, 5, 5, 5 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ - 27400, 6, 96, 4, 6, 6, 6, 6 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ - 29300, 7, 108, 4, 7, 7, 7, 7 }, - { VALID_2040, VALID_2040, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */ - 6400, 0, 0, 0, 8, 24, 8, 24 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */ - 12700, 1, 1, 2, 9, 25, 9, 25 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */ - 18800, 2, 2, 2, 10, 26, 10, 26 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */ - 25000, 3, 3, 4, 11, 27, 11, 27 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */ - 36700, 4, 4, 4, 12, 28, 12, 28 }, - { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */ - 48100, 5, 5, 4, 13, 29, 13, 29 }, - { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */ - 53500, 6, 6, 4, 14, 30, 14, 30 }, - { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */ - 59000, 7, 7, 4, 15, 31, 15, 32 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */ - 12700, 8, 8, 3, 16, 33, 16, 33 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */ - 24800, 9, 9, 2, 17, 34, 17, 34 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */ - 36600, 10, 10, 2, 18, 35, 18, 35 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */ - 48100, 11, 11, 4, 19, 36, 19, 36 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */ - 69500, 12, 12, 4, 20, 37, 20, 37 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */ - 89500, 13, 13, 4, 21, 38, 21, 38 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */ - 98900, 14, 14, 4, 22, 39, 22, 39 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */ - 108300, 15, 15, 4, 23, 40, 23, 41 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */ - 13200, 0, 0, 0, 8, 24, 24, 24 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */ - 25900, 1, 1, 2, 9, 25, 25, 25 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */ - 38600, 2, 2, 2, 10, 26, 26, 26 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */ - 49800, 3, 3, 4, 11, 27, 27, 27 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */ - 72200, 4, 4, 4, 12, 28, 28, 28 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */ - 92900, 5, 5, 4, 13, 29, 29, 29 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */ - 102700, 6, 6, 4, 14, 30, 30, 30 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */ - 112000, 7, 7, 4, 15, 31, 32, 32 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ - 122000, 7, 7, 4, 15, 31, 32, 32 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */ - 25800, 8, 8, 0, 16, 33, 33, 33 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */ - 49800, 9, 9, 2, 17, 34, 34, 34 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */ - 71900, 10, 10, 2, 18, 35, 35, 35 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */ - 92500, 11, 11, 4, 19, 36, 36, 36 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */ - 130300, 12, 12, 4, 20, 37, 37, 37 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */ - 162800, 13, 13, 4, 21, 38, 38, 38 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */ - 178200, 14, 14, 4, 22, 39, 39, 39 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */ - 192100, 15, 15, 4, 23, 40, 41, 41 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ - 207000, 15, 15, 4, 23, 40, 41, 41 }, + [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, + 5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */ + [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, + 7800, 1, 18, 0, 1, 1, 1 }, /* 9 Mb */ + [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, + 10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */ + [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, + 13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */ + [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, + 17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */ + [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, + 23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */ + [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, + 27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */ + [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, + 29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */ + [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500, + 6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */ + [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, + 12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */ + [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, + 18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */ + [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, + 25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */ + [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, + 36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */ + [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, + 48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */ + [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, + 53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */ + [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, + 59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */ + [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, + 65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */ + [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, + 12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */ + [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, + 24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */ + [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, + 36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */ + [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, + 48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */ + [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, + 69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */ + [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, + 89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */ + [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, + 98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */ + [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, + 108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */ + [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, + 120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */ + [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, + 17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */ + [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, + 35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */ + [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, + 52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */ + [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, + 70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */ + [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, + 104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */ + [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, + 115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/ + [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, + 137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */ + [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, + 151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */ + [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, + 152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */ + [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, + 168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/ + [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, + 168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */ + [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, + 185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */ + [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, + 13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/ + [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, + 25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/ + [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, + 38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/ + [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, + 49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */ + [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, + 72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */ + [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000, + 92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */ + [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, + 102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/ + [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, + 112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */ + [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, + 122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */ + [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, + 25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */ + [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, + 49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */ + [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, + 71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */ + [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, + 92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */ + [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, + 130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */ + [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, + 162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */ + [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, + 178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */ + [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, + 192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */ + [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, + 207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */ + [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, + 36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */ + [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, + 72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */ + [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, + 108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */ + [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, + 142000, 19, 19, 4, 59, 59, 59 }, /* 162 Mb */ + [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, + 205100, 20, 20, 4, 60, 61, 61 }, /* 243 Mb */ + [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, + 224700, 20, 20, 4, 60, 61, 61 }, /* 270 Mb */ + [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, + 263100, 21, 21, 4, 62, 63, 63 }, /* 324 Mb */ + [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, + 288000, 21, 21, 4, 62, 63, 63 }, /* 360 Mb */ + [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, + 290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */ + [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, + 317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */ + [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, + 317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */ + [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, + 346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */ }, 50, /* probe interval */ WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ @@ -116,101 +168,153 @@ static const struct ath_rate_table ar5416_11na_ratetable = { * for HT are the 64K max aggregate limit */ static const struct ath_rate_table ar5416_11ng_ratetable = { - 46, + 72, 12, /* MCS start */ { - { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ - 900, 0, 2, 0, 0, 0, 0, 0 }, - { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ - 1900, 1, 4, 1, 1, 1, 1, 1 }, - { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ - 4900, 2, 11, 2, 2, 2, 2, 2 }, - { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ - 8100, 3, 22, 3, 3, 3, 3, 3 }, - { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ - 5400, 4, 12, 4, 4, 4, 4, 4 }, - { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ - 7800, 5, 18, 4, 5, 5, 5, 5 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ - 10100, 6, 24, 6, 6, 6, 6, 6 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ - 14100, 7, 36, 6, 7, 7, 7, 7 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ - 17700, 8, 48, 8, 8, 8, 8, 8 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ - 23700, 9, 72, 8, 9, 9, 9, 9 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ - 27400, 10, 96, 8, 10, 10, 10, 10 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ - 30900, 11, 108, 8, 11, 11, 11, 11 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */ - 6400, 0, 0, 4, 12, 28, 12, 28 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */ - 12700, 1, 1, 6, 13, 29, 13, 29 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */ - 18800, 2, 2, 6, 14, 30, 14, 30 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */ - 25000, 3, 3, 8, 15, 31, 15, 31 }, - { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */ - 36700, 4, 4, 8, 16, 32, 16, 32 }, - { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */ - 48100, 5, 5, 8, 17, 33, 17, 33 }, - { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */ - 53500, 6, 6, 8, 18, 34, 18, 34 }, - { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */ - 59000, 7, 7, 8, 19, 35, 19, 36 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */ - 12700, 8, 8, 4, 20, 37, 20, 37 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */ - 24800, 9, 9, 6, 21, 38, 21, 38 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */ - 36600, 10, 10, 6, 22, 39, 22, 39 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */ - 48100, 11, 11, 8, 23, 40, 23, 40 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */ - 69500, 12, 12, 8, 24, 41, 24, 41 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */ - 89500, 13, 13, 8, 25, 42, 25, 42 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */ - 98900, 14, 14, 8, 26, 43, 26, 44 }, - { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */ - 108300, 15, 15, 8, 27, 44, 27, 45 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */ - 13200, 0, 0, 8, 12, 28, 28, 28 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */ - 25900, 1, 1, 8, 13, 29, 29, 29 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */ - 38600, 2, 2, 8, 14, 30, 30, 30 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */ - 49800, 3, 3, 8, 15, 31, 31, 31 }, - { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */ - 72200, 4, 4, 8, 16, 32, 32, 32 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */ - 92900, 5, 5, 8, 17, 33, 33, 33 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */ - 102700, 6, 6, 8, 18, 34, 34, 34 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */ - 112000, 7, 7, 8, 19, 35, 36, 36 }, - { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ - 122000, 7, 7, 8, 19, 35, 36, 36 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */ - 25800, 8, 8, 8, 20, 37, 37, 37 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */ - 49800, 9, 9, 8, 21, 38, 38, 38 }, - { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */ - 71900, 10, 10, 8, 22, 39, 39, 39 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */ - 92500, 11, 11, 8, 23, 40, 40, 40 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */ - 130300, 12, 12, 8, 24, 41, 41, 41 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */ - 162800, 13, 13, 8, 25, 42, 42, 42 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */ - 178200, 14, 14, 8, 26, 43, 43, 43 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */ - 192100, 15, 15, 8, 27, 44, 45, 45 }, - { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ - 207000, 15, 15, 8, 27, 44, 45, 45 }, + [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000, + 900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */ + [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000, + 1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */ + [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500, + 4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */ + [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000, + 8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */ + [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, + 5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */ + [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, + 7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */ + [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, + 10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */ + [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, + 14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */ + [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, + 17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */ + [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, + 23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */ + [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, + 27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */ + [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, + 30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */ + [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500, + 6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */ + [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, + 12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */ + [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, + 18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/ + [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, + 25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */ + [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, + 36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */ + [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, + 48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */ + [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, + 53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */ + [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, + 59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */ + [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, + 65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/ + [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, + 12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */ + [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, + 24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */ + [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, + 36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */ + [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, + 48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */ + [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, + 69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */ + [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, + 89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */ + [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, + 98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */ + [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, + 108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */ + [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, + 120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */ + [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, + 17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */ + [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, + 35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */ + [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, + 52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */ + [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, + 70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */ + [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, + 104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */ + [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, + 115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */ + [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, + 137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */ + [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, + 151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */ + [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, + 152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */ + [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, + 168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */ + [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, + 168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */ + [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, + 185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */ + [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, + 13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */ + [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, + 25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */ + [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, + 38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */ + [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, + 49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */ + [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, + 72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */ + [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000, + 92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */ + [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, + 102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */ + [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, + 112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */ + [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, + 122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */ + [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, + 25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */ + [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, + 49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */ + [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, + 71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */ + [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, + 92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */ + [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, + 130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */ + [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, + 162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */ + [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, + 178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */ + [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, + 192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */ + [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, + 207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */ + [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, + 36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */ + [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, + 72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */ + [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, + 108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */ + [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, + 142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */ + [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, + 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */ + [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, + 224700, 20, 20, 8, 64, 65, 65 }, /* 170 Mb */ + [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, + 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */ + [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, + 288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */ + [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, + 290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */ + [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, + 317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */ + [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, + 317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */ + [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, + 346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */ }, 50, /* probe interval */ WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ @@ -220,22 +324,22 @@ static const struct ath_rate_table ar5416_11a_ratetable = { 8, 0, { - { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ - 5400, 0, 12, 0, 0, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ - 7800, 1, 18, 0, 1, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ - 10000, 2, 24, 2, 2, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ - 13900, 3, 36, 2, 3, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ - 17300, 4, 48, 4, 4, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ - 23000, 5, 72, 4, 5, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ - 27400, 6, 96, 4, 6, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ - 29300, 7, 108, 4, 7, 0 }, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 0, 12, 0}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 1, 18, 0}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 2, 24, 2}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 3, 36, 2}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 4, 48, 4}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 5, 72, 4}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 6, 96, 4}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 7, 108, 4}, }, 50, /* probe interval */ 0, /* Phy rates allowed initially */ @@ -245,30 +349,30 @@ static const struct ath_rate_table ar5416_11g_ratetable = { 12, 0, { - { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ - 900, 0, 2, 0, 0, 0 }, - { VALID, VALID, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ - 1900, 1, 4, 1, 1, 0 }, - { VALID, VALID, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ - 4900, 2, 11, 2, 2, 0 }, - { VALID, VALID, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ - 8100, 3, 22, 3, 3, 0 }, - { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ - 5400, 4, 12, 4, 4, 0 }, - { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ - 7800, 5, 18, 4, 5, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ - 10000, 6, 24, 6, 6, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ - 13900, 7, 36, 6, 7, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ - 17300, 8, 48, 8, 8, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ - 23000, 9, 72, 8, 9, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ - 27400, 10, 96, 8, 10, 0 }, - { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ - 29300, 11, 108, 8, 11, 0 }, + { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ + 900, 0, 2, 0}, + { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ + 1900, 1, 4, 1}, + { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ + 4900, 2, 11, 2}, + { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ + 8100, 3, 22, 3}, + { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 4, 12, 4}, + { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 5, 18, 4}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 6, 24, 6}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 7, 36, 6}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 8, 48, 8}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 9, 72, 8}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 10, 96, 8}, + { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 11, 108, 8}, }, 50, /* probe interval */ 0, /* Phy rates allowed initially */ @@ -338,7 +442,7 @@ static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv, u8 index, int valid_tx_rate) { BUG_ON(index > ath_rc_priv->rate_table_size); - ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0; + ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate; } static inline @@ -370,6 +474,8 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) return 0; if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) return 0; + if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG)) + return 0; if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG)) return 0; if (!ignore_cw && WLAN_RC_PHY_HT(phy)) @@ -400,13 +506,9 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, u32 capflag) { u8 i, hi = 0; - u32 valid; for (i = 0; i < rate_table->rate_cnt; i++) { - valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? - rate_table->info[i].valid_single_stream : - rate_table->info[i].valid); - if (valid == 1) { + if (rate_table->info[i].rate_flags & RC_LEGACY) { u32 phy = rate_table->info[i].phy; u8 valid_rate_count = 0; @@ -418,7 +520,7 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i; ath_rc_priv->valid_phy_ratecnt[phy] += 1; ath_rc_set_valid_txmask(ath_rc_priv, i, 1); - hi = A_MAX(hi, i); + hi = i; } } @@ -436,9 +538,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, for (i = 0; i < rateset->rs_nrates; i++) { for (j = 0; j < rate_table->rate_cnt; j++) { u32 phy = rate_table->info[j].phy; - u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? - rate_table->info[j].valid_single_stream : - rate_table->info[j].valid); + u16 rate_flags = rate_table->info[i].rate_flags; u8 rate = rateset->rs_rates[i]; u8 dot11rate = rate_table->info[j].dot11rate; @@ -447,8 +547,9 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, * (VALID/VALID_20/VALID_40) flags */ if ((rate == dot11rate) && - ((valid & WLAN_RC_CAP_MODE(capflag)) == - WLAN_RC_CAP_MODE(capflag)) && + (rate_flags & WLAN_RC_CAP_MODE(capflag)) == + WLAN_RC_CAP_MODE(capflag) && + (rate_flags & WLAN_RC_CAP_STREAM(capflag)) && !WLAN_RC_PHY_HT(phy)) { u8 valid_rate_count = 0; @@ -482,14 +583,13 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv, for (i = 0; i < rateset->rs_nrates; i++) { for (j = 0; j < rate_table->rate_cnt; j++) { u32 phy = rate_table->info[j].phy; - u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? - rate_table->info[j].valid_single_stream : - rate_table->info[j].valid); + u16 rate_flags = rate_table->info[j].rate_flags; u8 rate = rateset->rs_rates[i]; u8 dot11rate = rate_table->info[j].dot11rate; if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) || - !WLAN_RC_PHY_HT_VALID(valid, capflag)) + !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) || + !WLAN_RC_PHY_HT_VALID(rate_flags, capflag)) continue; if (!ath_rc_valid_phyrate(phy, capflag, 0)) @@ -585,12 +685,15 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc, if (rate > (ath_rc_priv->rate_table_size - 1)) rate = ath_rc_priv->rate_table_size - 1; - if (rate_table->info[rate].valid && - (ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)) + if (RC_TS_ONLY(rate_table->info[rate].rate_flags) && + (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG)) + return rate; + + if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) && + (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG))) return rate; - if (rate_table->info[rate].valid_single_stream && - !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)) + if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags)) return rate; /* This should not happen */ @@ -1003,12 +1106,19 @@ static void ath_rc_update_ht(struct ath_softc *sc, static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, struct ieee80211_tx_rate *rate) { - int rix; + int rix = 0, i = 0; + int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 }; if (!(rate->flags & IEEE80211_TX_RC_MCS)) return rate->idx; - rix = rate->idx + rate_table->mcs_start; + while (rate->idx > mcs_rix_off[i] && + i < sizeof(mcs_rix_off)/sizeof(int)) { + rix++; i++; + } + + rix += rate->idx + rate_table->mcs_start; + if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && (rate->flags & IEEE80211_TX_RC_SHORT_GI)) rix = rate_table->info[rix].ht_index; @@ -1016,8 +1126,6 @@ static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, rix = rate_table->info[rix].sgi_index; else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rix = rate_table->info[rix].cw40index; - else - rix = rate_table->info[rix].base_index; return rix; } @@ -1193,20 +1301,19 @@ static void ath_rc_init(struct ath_softc *sc, } static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, - bool is_cw40, bool is_sgi40) + bool is_cw40, bool is_sgi) { u8 caps = 0; if (sta->ht_cap.ht_supported) { caps = WLAN_RC_HT_FLAG; - if (sc->sc_ah->caps.tx_chainmask != 1 && - ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) { - if (sta->ht_cap.mcs.rx_mask[1]) - caps |= WLAN_RC_DS_FLAG; - } + if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2]) + caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG; + else if (sta->ht_cap.mcs.rx_mask[1]) + caps |= WLAN_RC_DS_FLAG; if (is_cw40) caps |= WLAN_RC_40_FLAG; - if (is_sgi40) + if (is_sgi) caps |= WLAN_RC_SGI_FLAG; } @@ -1300,7 +1407,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ath_softc *sc = priv; struct ath_rate_priv *ath_rc_priv = priv_sta; const struct ath_rate_table *rate_table; - bool is_cw40, is_sgi40; + bool is_cw40, is_sgi = false; int i, j = 0; for (i = 0; i < sband->n_bitrates; i++) { @@ -1323,7 +1430,11 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, } is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; - is_sgi40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + + if (is_cw40) + is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) + is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; /* Choose rate table first */ @@ -1336,7 +1447,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, rate_table = hw_rate_table[sc->cur_rate_mode]; } - ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40); + ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi); ath_rc_init(sc, priv_sta, sband, sta, rate_table); } @@ -1347,10 +1458,10 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, struct ath_softc *sc = priv; struct ath_rate_priv *ath_rc_priv = priv_sta; const struct ath_rate_table *rate_table = NULL; - bool oper_cw40 = false, oper_sgi40; + bool oper_cw40 = false, oper_sgi; bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ? true : false; - bool local_sgi40 = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ? + bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ? true : false; /* FIXME: Handle AP mode later when we support CWM */ @@ -1363,15 +1474,21 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, oper_chan_type == NL80211_CHAN_HT40PLUS) oper_cw40 = true; - oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - true : false; + if (oper_cw40) + oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + true : false; + else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) + oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + true : false; + else + oper_sgi = false; - if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) { + if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) { rate_table = ath_choose_rate_table(sc, sband->band, sta->ht_cap.ht_supported, oper_cw40); ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, - oper_cw40, oper_sgi40); + oper_cw40, oper_sgi); ath_rc_init(sc, priv_sta, sband, sta, rate_table); ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h index 3d8d40cdc99ee2f934cf13109a54a4b604ccf2a9..dc1082654501145e15cda7f0a9b17395b001d9ef 100644 --- a/drivers/net/wireless/ath/ath9k/rc.h +++ b/drivers/net/wireless/ath/ath9k/rc.h @@ -24,32 +24,63 @@ struct ath_softc; #define ATH_RATE_MAX 30 -#define RATE_TABLE_SIZE 64 +#define RATE_TABLE_SIZE 72 #define MAX_TX_RATE_PHY 48 -/* VALID_ALL - valid for 20/40/Legacy, - * VALID - Legacy only, - * VALID_20 - HT 20 only, - * VALID_40 - HT 40 only */ -#define INVALID 0x0 -#define VALID 0x1 -#define VALID_20 0x2 -#define VALID_40 0x4 -#define VALID_2040 (VALID_20|VALID_40) -#define VALID_ALL (VALID_2040|VALID) +#define RC_INVALID 0x0000 +#define RC_LEGACY 0x0001 +#define RC_SS 0x0002 +#define RC_DS 0x0004 +#define RC_TS 0x0008 +#define RC_HT_20 0x0010 +#define RC_HT_40 0x0020 + +#define RC_STREAM_MASK 0xe +#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \ + (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS))) +#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS) +#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY)) + +#define RC_HT_2040 (RC_HT_20 | RC_HT_40) +#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS) +#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS) +#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS) +#define RC_HT_S_20 (RC_HT_20 | RC_SS) +#define RC_HT_D_20 (RC_HT_20 | RC_DS) +#define RC_HT_T_20 (RC_HT_20 | RC_TS) +#define RC_HT_S_40 (RC_HT_40 | RC_SS) +#define RC_HT_D_40 (RC_HT_40 | RC_DS) +#define RC_HT_T_40 (RC_HT_40 | RC_TS) + +#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS) +#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS) +#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS) +#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS) + +#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS) +#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS) + +#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS) +#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS) + +#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM) enum { WLAN_RC_PHY_OFDM, WLAN_RC_PHY_CCK, WLAN_RC_PHY_HT_20_SS, WLAN_RC_PHY_HT_20_DS, + WLAN_RC_PHY_HT_20_TS, WLAN_RC_PHY_HT_40_SS, WLAN_RC_PHY_HT_40_DS, + WLAN_RC_PHY_HT_40_TS, WLAN_RC_PHY_HT_20_SS_HGI, WLAN_RC_PHY_HT_20_DS_HGI, + WLAN_RC_PHY_HT_20_TS_HGI, WLAN_RC_PHY_HT_40_SS_HGI, WLAN_RC_PHY_HT_40_DS_HGI, + WLAN_RC_PHY_HT_40_TS_HGI, WLAN_RC_PHY_MAX }; @@ -57,36 +88,50 @@ enum { || (_phy == WLAN_RC_PHY_HT_40_DS) \ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) +#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \ + || (_phy == WLAN_RC_PHY_HT_40_TS) \ + || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_TS_HGI)) #define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \ || (_phy == WLAN_RC_PHY_HT_20_DS) \ + || (_phy == WLAN_RC_PHY_HT_20_TS) \ || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ - || (_phy == WLAN_RC_PHY_HT_20_DS_HGI)) + || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_20_TS_HGI)) #define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ || (_phy == WLAN_RC_PHY_HT_40_DS) \ + || (_phy == WLAN_RC_PHY_HT_40_TS) \ || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ - || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_TS_HGI)) #define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \ || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ - || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_TS_HGI)) #define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS) #define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \ - (capflag & WLAN_RC_40_FLAG) ? VALID_40 : VALID_20 : VALID)) + ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY)) + +#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \ + (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS))) /* Return TRUE if flag supports HT20 && client supports HT20 or * return TRUE if flag supports HT40 && client supports HT40. * This is used becos some rates overlap between HT20/HT40. */ #define WLAN_RC_PHY_HT_VALID(flag, capflag) \ - (((flag & VALID_20) && !(capflag & WLAN_RC_40_FLAG)) || \ - ((flag & VALID_40) && (capflag & WLAN_RC_40_FLAG))) + (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \ + ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG))) #define WLAN_RC_DS_FLAG (0x01) -#define WLAN_RC_40_FLAG (0x02) -#define WLAN_RC_SGI_FLAG (0x04) -#define WLAN_RC_HT_FLAG (0x08) +#define WLAN_RC_TS_FLAG (0x02) +#define WLAN_RC_40_FLAG (0x04) +#define WLAN_RC_SGI_FLAG (0x08) +#define WLAN_RC_HT_FLAG (0x10) /** * struct ath_rate_table - Rate Control table @@ -110,15 +155,13 @@ struct ath_rate_table { int rate_cnt; int mcs_start; struct { - u8 valid; - u8 valid_single_stream; + u16 rate_flags; u8 phy; u32 ratekbps; u32 user_ratekbps; u8 ratecode; u8 dot11rate; u8 ctrl_rate; - u8 base_index; u8 cw40index; u8 sgi_index; u8 ht_index; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index e3e52913d83a49326e3319cd67e1a0275b302d39..da0cfe90c38acab5c5a7bbca9a5e74ed92b7c837 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -116,9 +116,6 @@ static void ath_opmode_init(struct ath_softc *sc) /* configure operational mode */ ath9k_hw_setopmode(ah); - /* Handle any link-level address change. */ - ath9k_hw_setmac(ah, common->macaddr); - /* calculate and install multicast filter */ mfilt[0] = mfilt[1] = ~0; ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); @@ -295,7 +292,7 @@ static void ath_edma_start_recv(struct ath_softc *sc) ath_opmode_init(sc); - ath9k_hw_startpcureceive(sc->sc_ah); + ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING)); } static void ath_edma_stop_recv(struct ath_softc *sc) @@ -501,7 +498,7 @@ int ath_startrecv(struct ath_softc *sc) start_recv: spin_unlock_bh(&sc->rx.rxbuflock); ath_opmode_init(sc); - ath9k_hw_startpcureceive(ah); + ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING)); return 0; } @@ -700,12 +697,16 @@ static bool ath_edma_get_buffers(struct ath_softc *sc, bf = SKB_CB_ATHBUF(skb); BUG_ON(!bf); - dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, + dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); - if (ret == -EINPROGRESS) + if (ret == -EINPROGRESS) { + /*let device gain the buffer again*/ + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, + common->rx_bufsize, DMA_FROM_DEVICE); return false; + } __skb_unlink(skb, &rx_edma->rx_fifo); if (ret == -EINVAL) { @@ -814,13 +815,263 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, * 1. accessing the frame * 2. requeueing the same buffer to h/w */ - dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, + dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); return bf; } +/* Assumes you've already done the endian to CPU conversion */ +static bool ath9k_rx_accept(struct ath_common *common, + struct ieee80211_hdr *hdr, + struct ieee80211_rx_status *rxs, + struct ath_rx_status *rx_stats, + bool *decrypt_error) +{ + struct ath_hw *ah = common->ah; + __le16 fc; + u8 rx_status_len = ah->caps.rx_status_len; + + fc = hdr->frame_control; + + if (!rx_stats->rs_datalen) + return false; + /* + * rs_status follows rs_datalen so if rs_datalen is too large + * we can take a hint that hardware corrupted it, so ignore + * those frames. + */ + if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) + return false; + + /* + * rs_more indicates chained descriptors which can be used + * to link buffers together for a sort of scatter-gather + * operation. + * reject the frame, we don't support scatter-gather yet and + * the frame is probably corrupt anyway + */ + if (rx_stats->rs_more) + return false; + + /* + * The rx_stats->rs_status will not be set until the end of the + * chained descriptors so it can be ignored if rs_more is set. The + * rs_more will be false at the last element of the chained + * descriptors. + */ + if (rx_stats->rs_status != 0) { + if (rx_stats->rs_status & ATH9K_RXERR_CRC) + rxs->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_stats->rs_status & ATH9K_RXERR_PHY) + return false; + + if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { + *decrypt_error = true; + } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { + if (ieee80211_is_ctl(fc)) + /* + * Sometimes, we get invalid + * MIC failures on valid control frames. + * Remove these mic errors. + */ + rx_stats->rs_status &= ~ATH9K_RXERR_MIC; + else + rxs->flag |= RX_FLAG_MMIC_ERROR; + } + /* + * Reject error frames with the exception of + * decryption and MIC failures. For monitor mode, + * we also ignore the CRC error. + */ + if (ah->opmode == NL80211_IFTYPE_MONITOR) { + if (rx_stats->rs_status & + ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | + ATH9K_RXERR_CRC)) + return false; + } else { + if (rx_stats->rs_status & + ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { + return false; + } + } + } + return true; +} + +static int ath9k_process_rate(struct ath_common *common, + struct ieee80211_hw *hw, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rxs) +{ + struct ieee80211_supported_band *sband; + enum ieee80211_band band; + unsigned int i = 0; + + band = hw->conf.channel->band; + sband = hw->wiphy->bands[band]; + + if (rx_stats->rs_rate & 0x80) { + /* HT rate */ + rxs->flag |= RX_FLAG_HT; + if (rx_stats->rs_flags & ATH9K_RX_2040) + rxs->flag |= RX_FLAG_40MHZ; + if (rx_stats->rs_flags & ATH9K_RX_GI) + rxs->flag |= RX_FLAG_SHORT_GI; + rxs->rate_idx = rx_stats->rs_rate & 0x7f; + return 0; + } + + for (i = 0; i < sband->n_bitrates; i++) { + if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { + rxs->rate_idx = i; + return 0; + } + if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { + rxs->flag |= RX_FLAG_SHORTPRE; + rxs->rate_idx = i; + return 0; + } + } + + /* + * No valid hardware bitrate found -- we should not get here + * because hardware has already validated this frame as OK. + */ + ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " + "0x%02x using 1 Mbit\n", rx_stats->rs_rate); + + return -EINVAL; +} + +static void ath9k_process_rssi(struct ath_common *common, + struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, + struct ath_rx_status *rx_stats) +{ + struct ath_hw *ah = common->ah; + struct ieee80211_sta *sta; + struct ath_node *an; + int last_rssi = ATH_RSSI_DUMMY_MARKER; + __le16 fc; + + fc = hdr->frame_control; + + rcu_read_lock(); + /* + * XXX: use ieee80211_find_sta! This requires quite a bit of work + * under the current ath9k virtual wiphy implementation as we have + * no way of tying a vif to wiphy. Typically vifs are attached to + * at least one sdata of a wiphy on mac80211 but with ath9k virtual + * wiphy you'd have to iterate over every wiphy and each sdata. + */ + sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); + if (sta) { + an = (struct ath_node *) sta->drv_priv; + if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && + !rx_stats->rs_moreaggr) + ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); + last_rssi = an->last_rssi; + } + rcu_read_unlock(); + + if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) + rx_stats->rs_rssi = ATH_EP_RND(last_rssi, + ATH_RSSI_EP_MULTIPLIER); + if (rx_stats->rs_rssi < 0) + rx_stats->rs_rssi = 0; + + /* Update Beacon RSSI, this is used by ANI. */ + if (ieee80211_is_beacon(fc)) + ah->stats.avgbrssi = rx_stats->rs_rssi; +} + +/* + * For Decrypt or Demic errors, we only mark packet status here and always push + * up the frame up to let mac80211 handle the actual error case, be it no + * decryption key or real decryption error. This let us keep statistics there. + */ +static int ath9k_rx_skb_preprocess(struct ath_common *common, + struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rx_status, + bool *decrypt_error) +{ + memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); + + /* + * everything but the rate is checked here, the rate check is done + * separately to avoid doing two lookups for a rate for each frame. + */ + if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) + return -EINVAL; + + ath9k_process_rssi(common, hw, hdr, rx_stats); + + if (ath9k_process_rate(common, hw, rx_stats, rx_status)) + return -EINVAL; + + rx_status->band = hw->conf.channel->band; + rx_status->freq = hw->conf.channel->center_freq; + rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; + rx_status->antenna = rx_stats->rs_antenna; + rx_status->flag |= RX_FLAG_TSFT; + + return 0; +} + +static void ath9k_rx_skb_postprocess(struct ath_common *common, + struct sk_buff *skb, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rxs, + bool decrypt_error) +{ + struct ath_hw *ah = common->ah; + struct ieee80211_hdr *hdr; + int hdrlen, padpos, padsize; + u8 keyix; + __le16 fc; + + /* see if any padding is done by the hw and remove it */ + hdr = (struct ieee80211_hdr *) skb->data; + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + fc = hdr->frame_control; + padpos = ath9k_cmn_padpos(hdr->frame_control); + + /* The MAC header is padded to have 32-bit boundary if the + * packet payload is non-zero. The general calculation for + * padsize would take into account odd header lengths: + * padsize = (4 - padpos % 4) % 4; However, since only + * even-length headers are used, padding can only be 0 or 2 + * bytes and we can optimize this a bit. In addition, we must + * not try to remove padding from short control frames that do + * not have payload. */ + padsize = padpos & 3; + if (padsize && skb->len>=padpos+padsize+FCS_LEN) { + memmove(skb->data + padsize, skb->data, padpos); + skb_pull(skb, padsize); + } + + keyix = rx_stats->rs_keyix; + + if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && + ieee80211_has_protected(fc)) { + rxs->flag |= RX_FLAG_DECRYPTED; + } else if (ieee80211_has_protected(fc) + && !decrypt_error && skb->len >= hdrlen + 4) { + keyix = skb->data[hdrlen + 3] >> 6; + + if (test_bit(keyix, common->keymap)) + rxs->flag |= RX_FLAG_DECRYPTED; + } + if (ah->sw_mgmt_crypto && + (rxs->flag & RX_FLAG_DECRYPTED) && + ieee80211_is_mgmt(fc)) + /* Use software decrypt for management frames. */ + rxs->flag &= ~RX_FLAG_DECRYPTED; +} int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { @@ -842,6 +1093,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); int dma_type; + u8 rx_status_len = ah->caps.rx_status_len; + u64 tsf = 0; + u32 tsf_lower = 0; if (edma) dma_type = DMA_BIDIRECTIONAL; @@ -851,6 +1105,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; spin_lock_bh(&sc->rx.rxbuflock); + tsf = ath9k_hw_gettsf64(ah); + tsf_lower = tsf & 0xffffffff; + do { /* If handling rx interrupt and flush is in progress => exit */ if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) @@ -869,7 +1126,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (!skb) continue; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); rxs = IEEE80211_SKB_RXCB(skb); hw = ath_get_virt_hw(sc, hdr); @@ -883,8 +1140,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (flush) goto requeue; - retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs, - rxs, &decrypt_error); + rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; + if (rs.rs_tstamp > tsf_lower && + unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) + rxs->mactime -= 0x100000000ULL; + + if (rs.rs_tstamp < tsf_lower && + unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) + rxs->mactime += 0x100000000ULL; + + retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, + rxs, &decrypt_error); if (retval) goto requeue; @@ -908,8 +1174,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (ah->caps.rx_status_len) skb_pull(skb, ah->caps.rx_status_len); - ath9k_cmn_rx_skb_postprocess(common, skb, &rs, - rxs, decrypt_error); + ath9k_rx_skb_postprocess(common, skb, &rs, + rxs, decrypt_error); /* We will now give hardware our shiny new allocated skb */ bf->bf_mpdu = requeue_skb; diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index d4371a43bdaac35a6073a55fccf319c3bbe46043..633e3d949ec09f172fab67d797ab449a557ae4f1 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -222,6 +222,7 @@ #define AR_ISR_S2 0x008c #define AR_ISR_S2_QCU_TXURN 0x000003FF +#define AR_ISR_S2_BB_WATCHDOG 0x00010000 #define AR_ISR_S2_CST 0x00400000 #define AR_ISR_S2_GTT 0x00800000 #define AR_ISR_S2_TIM 0x01000000 @@ -699,7 +700,15 @@ #define AR_RC_HOSTIF 0x00000100 #define AR_WA 0x4004 +#define AR_WA_BIT6 (1 << 6) +#define AR_WA_BIT7 (1 << 7) +#define AR_WA_BIT23 (1 << 23) #define AR_WA_D3_L1_DISABLE (1 << 14) +#define AR_WA_D3_TO_L1_DISABLE_REAL (1 << 16) +#define AR_WA_ASPM_TIMER_BASED_DISABLE (1 << 17) +#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */ +#define AR_WA_ANALOG_SHIFT (1 << 20) +#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */ #define AR9285_WA_DEFAULT 0x004a050b #define AR9280_WA_DEFAULT 0x0040073b #define AR_WA_DEFAULT 0x0000073f @@ -756,32 +765,33 @@ #define AR_SREV_REVISION2 0x00000F00 #define AR_SREV_REVISION2_S 8 -#define AR_SREV_VERSION_5416_PCI 0xD -#define AR_SREV_VERSION_5416_PCIE 0xC -#define AR_SREV_REVISION_5416_10 0 -#define AR_SREV_REVISION_5416_20 1 -#define AR_SREV_REVISION_5416_22 2 -#define AR_SREV_VERSION_9100 0x14 -#define AR_SREV_VERSION_9160 0x40 -#define AR_SREV_REVISION_9160_10 0 -#define AR_SREV_REVISION_9160_11 1 -#define AR_SREV_VERSION_9280 0x80 -#define AR_SREV_REVISION_9280_10 0 -#define AR_SREV_REVISION_9280_20 1 -#define AR_SREV_REVISION_9280_21 2 -#define AR_SREV_VERSION_9285 0xC0 -#define AR_SREV_REVISION_9285_10 0 -#define AR_SREV_REVISION_9285_11 1 -#define AR_SREV_REVISION_9285_12 2 -#define AR_SREV_VERSION_9287 0x180 -#define AR_SREV_REVISION_9287_10 0 -#define AR_SREV_REVISION_9287_11 1 -#define AR_SREV_REVISION_9287_12 2 -#define AR_SREV_VERSION_9271 0x140 -#define AR_SREV_REVISION_9271_10 0 -#define AR_SREV_REVISION_9271_11 1 -#define AR_SREV_VERSION_9300 0x1c0 -#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ +#define AR_SREV_VERSION_5416_PCI 0xD +#define AR_SREV_VERSION_5416_PCIE 0xC +#define AR_SREV_REVISION_5416_10 0 +#define AR_SREV_REVISION_5416_20 1 +#define AR_SREV_REVISION_5416_22 2 +#define AR_SREV_VERSION_9100 0x14 +#define AR_SREV_VERSION_9160 0x40 +#define AR_SREV_REVISION_9160_10 0 +#define AR_SREV_REVISION_9160_11 1 +#define AR_SREV_VERSION_9280 0x80 +#define AR_SREV_REVISION_9280_10 0 +#define AR_SREV_REVISION_9280_20 1 +#define AR_SREV_REVISION_9280_21 2 +#define AR_SREV_VERSION_9285 0xC0 +#define AR_SREV_REVISION_9285_10 0 +#define AR_SREV_REVISION_9285_11 1 +#define AR_SREV_REVISION_9285_12 2 +#define AR_SREV_VERSION_9287 0x180 +#define AR_SREV_REVISION_9287_10 0 +#define AR_SREV_REVISION_9287_11 1 +#define AR_SREV_REVISION_9287_12 2 +#define AR_SREV_REVISION_9287_13 3 +#define AR_SREV_VERSION_9271 0x140 +#define AR_SREV_REVISION_9271_10 0 +#define AR_SREV_REVISION_9271_11 1 +#define AR_SREV_VERSION_9300 0x1c0 +#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ #define AR_SREV_5416(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ @@ -859,6 +869,11 @@ (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_12))) +#define AR_SREV_9287_13_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_13))) + #define AR_SREV_9271(_ah) \ (((_ah))->hw_version.macVersion == AR_SREV_VERSION_9271) #define AR_SREV_9271_10(_ah) \ @@ -867,6 +882,7 @@ #define AR_SREV_9271_11(_ah) \ (AR_SREV_9271(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11)) + #define AR_SREV_9300(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300)) #define AR_SREV_9300_20(_ah) \ @@ -881,6 +897,10 @@ (AR_SREV_9285_12_OR_LATER(_ah) && \ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) +#define AR_DEVID_7010(_ah) \ + (((_ah)->hw_version.devid == 0x7010) || \ + ((_ah)->hw_version.devid == 0x9018)) + #define AR_RADIO_SREV_MAJOR 0xf0 #define AR_RAD5133_SREV_MAJOR 0xc0 #define AR_RAD2133_SREV_MAJOR 0xd0 @@ -978,6 +998,7 @@ enum { #define AR9287_NUM_GPIO 11 #define AR9271_NUM_GPIO 16 #define AR9300_NUM_GPIO 17 +#define AR7010_NUM_GPIO 16 #define AR_GPIO_IN_OUT 0x4048 #define AR_GPIO_IN_VAL 0x0FFFC000 @@ -992,6 +1013,8 @@ enum { #define AR9271_GPIO_IN_VAL_S 16 #define AR9300_GPIO_IN_VAL 0x0001FFFF #define AR9300_GPIO_IN_VAL_S 0 +#define AR7010_GPIO_IN_VAL 0x0000FFFF +#define AR7010_GPIO_IN_VAL_S 0 #define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c) #define AR_GPIO_OE_OUT_DRV 0x3 @@ -1000,6 +1023,21 @@ enum { #define AR_GPIO_OE_OUT_DRV_HI 0x2 #define AR_GPIO_OE_OUT_DRV_ALL 0x3 +#define AR7010_GPIO_OE 0x52000 +#define AR7010_GPIO_OE_MASK 0x1 +#define AR7010_GPIO_OE_AS_OUTPUT 0x0 +#define AR7010_GPIO_OE_AS_INPUT 0x1 +#define AR7010_GPIO_IN 0x52004 +#define AR7010_GPIO_OUT 0x52008 +#define AR7010_GPIO_SET 0x5200C +#define AR7010_GPIO_CLEAR 0x52010 +#define AR7010_GPIO_INT 0x52014 +#define AR7010_GPIO_INT_TYPE 0x52018 +#define AR7010_GPIO_INT_POLARITY 0x5201C +#define AR7010_GPIO_PENDING 0x52020 +#define AR7010_GPIO_INT_MASK 0x52024 +#define AR7010_GPIO_FUNCTION 0x52028 + #define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050) #define AR_GPIO_INTR_POL_VAL 0x0001FFFF #define AR_GPIO_INTR_POL_VAL_S 0 diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c index 105ad40968f6a41af2b7de50299a97d239b58d21..fd20241f57d8ef8239aa3cdc62bec57198e2ec46 100644 --- a/drivers/net/wireless/ath/ath9k/virtual.c +++ b/drivers/net/wireless/ath/ath9k/virtual.c @@ -219,7 +219,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy, info->control.rates[1].idx = -1; memset(&txctl, 0, sizeof(struct ath_tx_control)); - txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]]; + txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]]; txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE; if (ath_tx_start(aphy->hw, skb, &txctl) != 0) @@ -695,16 +695,18 @@ void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle) idle ? "idle" : "not-idle"); } /* Only bother starting a queue on an active virtual wiphy */ -void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) +bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) { struct ieee80211_hw *hw = sc->pri_wiphy->hw; unsigned int i; + bool txq_started = false; spin_lock_bh(&sc->wiphy_lock); /* Start the primary wiphy */ if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) { ieee80211_wake_queue(hw, skb_queue); + txq_started = true; goto unlock; } @@ -718,11 +720,13 @@ void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) hw = aphy->hw; ieee80211_wake_queue(hw, skb_queue); + txq_started = true; break; } unlock: spin_unlock_bh(&sc->wiphy_lock); + return txq_started; } /* Go ahead and propagate information to all virtual wiphys, it won't hurt */ diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index e23172c9caaf1e038a2ee0b91556c008062030cc..6260faa658a262ebd16017425deb2cdd28aa48ae 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -279,9 +279,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, if (wmi->drv_priv->op_flags & OP_UNPLUGGED) return 0; - if (!wmi) - return -EINVAL; - skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC); if (!skb) return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 859aa4ab07698aafcd937b606cb7bafd7f4e05fe..501b72821b4d426e5138b7f3b84e9a962d617361 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -328,6 +328,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, u32 ba[WME_BA_BMP_SIZE >> 5]; int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; bool rc_update = true; + struct ieee80211_tx_rate rates[4]; skb = bf->bf_mpdu; hdr = (struct ieee80211_hdr *)skb->data; @@ -335,18 +336,44 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, tx_info = IEEE80211_SKB_CB(skb); hw = bf->aphy->hw; + memcpy(rates, tx_info->control.rates, sizeof(rates)); + rcu_read_lock(); /* XXX: use ieee80211_find_sta! */ sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); if (!sta) { rcu_read_unlock(); + + INIT_LIST_HEAD(&bf_head); + while (bf) { + bf_next = bf->bf_next; + + bf->bf_state.bf_type |= BUF_XRETRY; + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || + !bf->bf_stale || bf_next != NULL) + list_move_tail(&bf->list, &bf_head); + + ath_tx_rc_status(bf, ts, 0, 0, false); + ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, + 0, 0); + + bf = bf_next; + } return; } an = (struct ath_node *)sta->drv_priv; tid = ATH_AN_2_TID(an, bf->bf_tidno); + /* + * The hardware occasionally sends a tx status for the wrong TID. + * In this case, the BA status cannot be considered valid and all + * subframes need to be retransmitted + */ + if (bf->bf_tidno != ts->tid) + txok = false; + isaggr = bf_isaggr(bf); memset(ba, 0, WME_BA_BMP_SIZE >> 3); @@ -375,6 +402,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, txfail = txpending = 0; bf_next = bf->bf_next; + skb = bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { /* transmit completion, subframe is * acked by block ack */ @@ -428,6 +458,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, spin_unlock_bh(&txq->axq_lock); if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { + memcpy(tx_info->control.rates, rates, sizeof(rates)); ath_tx_rc_status(bf, ts, nbad, txok, true); rc_update = false; } else { @@ -487,6 +518,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, bf = bf_next; } + /* prepend un-acked frames to the beginning of the pending frame queue */ + if (!list_empty(&bf_pending)) { + spin_lock_bh(&txq->axq_lock); + list_splice(&bf_pending, &tid->buf_q); + ath_tx_queue_tid(txq, tid); + spin_unlock_bh(&txq->axq_lock); + } + if (tid->state & AGGR_CLEANUP) { if (tid->baw_head == tid->baw_tail) { tid->state &= ~AGGR_ADDBA_COMPLETE; @@ -499,14 +538,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, return; } - /* prepend un-acked frames to the beginning of the pending frame queue */ - if (!list_empty(&bf_pending)) { - spin_lock_bh(&txq->axq_lock); - list_splice(&bf_pending, &tid->buf_q); - ath_tx_queue_tid(txq, tid); - spin_unlock_bh(&txq->axq_lock); - } - rcu_read_unlock(); if (needreset) @@ -941,6 +972,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) if (!ATH_TXQ_SETUP(sc, qnum)) { struct ath_txq *txq = &sc->tx.txq[qnum]; + txq->axq_class = subtype; txq->axq_qnum = qnum; txq->axq_link = NULL; INIT_LIST_HEAD(&txq->axq_q); @@ -958,58 +990,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) return &sc->tx.txq[qnum]; } -int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) -{ - int qnum; - - switch (qtype) { - case ATH9K_TX_QUEUE_DATA: - if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { - ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, - "HAL AC %u out of range, max %zu!\n", - haltype, ARRAY_SIZE(sc->tx.hwq_map)); - return -1; - } - qnum = sc->tx.hwq_map[haltype]; - break; - case ATH9K_TX_QUEUE_BEACON: - qnum = sc->beacon.beaconq; - break; - case ATH9K_TX_QUEUE_CAB: - qnum = sc->beacon.cabq->axq_qnum; - break; - default: - qnum = -1; - } - return qnum; -} - -struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) -{ - struct ath_txq *txq = NULL; - u16 skb_queue = skb_get_queue_mapping(skb); - int qnum; - - qnum = ath_get_hal_qnum(skb_queue, sc); - txq = &sc->tx.txq[qnum]; - - spin_lock_bh(&txq->axq_lock); - - if (txq->axq_depth >= (ATH_TXBUF - 20)) { - ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT, - "TX queue: %d is full, depth: %d\n", - qnum, txq->axq_depth); - ath_mac80211_stop_queue(sc, skb_queue); - txq->stopped = 1; - spin_unlock_bh(&txq->axq_lock); - return NULL; - } - - spin_unlock_bh(&txq->axq_lock); - - return txq; -} - int ath_txq_update(struct ath_softc *sc, int qnum, struct ath9k_tx_queue_info *qinfo) { @@ -1688,12 +1668,15 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, bf->bf_frmlen -= padsize; } - if (conf_is_ht(&hw->conf)) { + if (!txctl->paprd && conf_is_ht(&hw->conf)) { bf->bf_state.bf_type |= BUF_HT; if (tx_info->flags & IEEE80211_TX_CTL_LDPC) use_ldpc = true; } + bf->bf_state.bfs_paprd = txctl->paprd; + if (txctl->paprd) + bf->bf_state.bfs_paprd_timestamp = jiffies; bf->bf_flags = setup_tx_flags(skb, use_ldpc); bf->bf_keytype = get_hw_crypto_keytype(skb); @@ -1768,6 +1751,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, bf->bf_buf_addr, txctl->txq->axq_qnum); + if (bf->bf_state.bfs_paprd) + ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd); + spin_lock_bh(&txctl->txq->axq_lock); if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && @@ -1809,8 +1795,9 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath_wiphy *aphy = hw->priv; struct ath_softc *sc = aphy->sc; struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_txq *txq = txctl->txq; struct ath_buf *bf; - int r; + int q, r; bf = ath_tx_get_buffer(sc); if (!bf) { @@ -1820,8 +1807,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, r = ath_tx_setup_buffer(hw, bf, skb, txctl); if (unlikely(r)) { - struct ath_txq *txq = txctl->txq; - ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); /* upon ath_tx_processq() this TX queue will be resumed, we @@ -1829,7 +1814,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, * we will at least have to run TX completionon one buffer * on the queue */ spin_lock_bh(&txq->axq_lock); - if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) { + if (!txq->stopped && txq->axq_depth > 1) { ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); txq->stopped = 1; } @@ -1840,6 +1825,17 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, return r; } + q = skb_get_queue_mapping(skb); + if (q >= 4) + q = 0; + + spin_lock_bh(&txq->axq_lock); + if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) { + ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); + txq->stopped = 1; + } + spin_unlock_bh(&txq->axq_lock); + ath_tx_start_dma(sc, bf, txctl); return 0; @@ -1909,7 +1905,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; - int padpos, padsize; + int q, padpos, padsize; ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); @@ -1948,8 +1944,16 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) ath9k_tx_status(hw, skb); - else + else { + q = skb_get_queue_mapping(skb); + if (q >= 4) + q = 0; + + if (--sc->tx.pending_frames[q] < 0) + sc->tx.pending_frames[q] = 0; + ieee80211_tx_status(hw, skb); + } } static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, @@ -1971,8 +1975,18 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, } dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); - ath_tx_complete(sc, skb, bf->aphy, tx_flags); - ath_debug_stat_tx(sc, txq, bf, ts); + + if (bf->bf_state.bfs_paprd) { + if (time_after(jiffies, + bf->bf_state.bfs_paprd_timestamp + + msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) + dev_kfree_skb_any(skb); + else + complete(&sc->paprd_complete); + } else { + ath_tx_complete(sc, skb, bf->aphy, tx_flags); + ath_debug_stat_tx(sc, txq, bf, ts); + } /* * Return the list of ath_buf of this mpdu to free queue @@ -2050,21 +2064,21 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, tx_info->status.rates[i].idx = -1; } - tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1; + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; } static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) { int qnum; + qnum = ath_get_mac80211_qnum(txq->axq_class, sc); + if (qnum == -1) + return; + spin_lock_bh(&txq->axq_lock); - if (txq->stopped && - sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) { - qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); - if (qnum != -1) { - ath_mac80211_start_queue(sc, qnum); + if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) { + if (ath_mac80211_start_queue(sc, qnum)) txq->stopped = 0; - } } spin_unlock_bh(&txq->axq_lock); } @@ -2161,7 +2175,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) * This frame is sent out as a single frame. * Use hardware retry status for this frame. */ - bf->bf_retries = ts.ts_longretry; if (ts.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; ath_tx_rc_status(bf, &ts, 0, txok, true); @@ -2279,8 +2292,18 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) txok = !(txs.ts_status & ATH9K_TXERR_MASK); + /* + * Make sure null func frame is acked before configuring + * hw into ps mode. + */ + if (bf->bf_isnullfunc && txok) { + if ((sc->ps_flags & PS_ENABLED)) + ath9k_enable_ps(sc); + else + sc->ps_flags |= PS_NULLFUNC_COMPLETED; + } + if (!bf_isampdu(bf)) { - bf->bf_retries = txs.ts_longretry; if (txs.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; ath_tx_rc_status(bf, &txs, 0, txok, true); @@ -2424,62 +2447,44 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) for (acno = 0, ac = &an->ac[acno]; acno < WME_NUM_AC; acno++, ac++) { ac->sched = false; + ac->qnum = sc->tx.hwq_map[acno]; INIT_LIST_HEAD(&ac->tid_q); - - switch (acno) { - case WME_AC_BE: - ac->qnum = ath_tx_get_qnum(sc, - ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); - break; - case WME_AC_BK: - ac->qnum = ath_tx_get_qnum(sc, - ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); - break; - case WME_AC_VI: - ac->qnum = ath_tx_get_qnum(sc, - ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); - break; - case WME_AC_VO: - ac->qnum = ath_tx_get_qnum(sc, - ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); - break; - } } } void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) { - int i; - struct ath_atx_ac *ac, *ac_tmp; - struct ath_atx_tid *tid, *tid_tmp; + struct ath_atx_ac *ac; + struct ath_atx_tid *tid; struct ath_txq *txq; + int i, tidno; - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { - if (ATH_TXQ_SETUP(sc, i)) { - txq = &sc->tx.txq[i]; + for (tidno = 0, tid = &an->tid[tidno]; + tidno < WME_NUM_TID; tidno++, tid++) { + i = tid->ac->qnum; - spin_lock_bh(&txq->axq_lock); + if (!ATH_TXQ_SETUP(sc, i)) + continue; - list_for_each_entry_safe(ac, - ac_tmp, &txq->axq_acq, list) { - tid = list_first_entry(&ac->tid_q, - struct ath_atx_tid, list); - if (tid && tid->an != an) - continue; - list_del(&ac->list); - ac->sched = false; - - list_for_each_entry_safe(tid, - tid_tmp, &ac->tid_q, list) { - list_del(&tid->list); - tid->sched = false; - ath_tid_drain(sc, txq, tid); - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; - } - } + txq = &sc->tx.txq[i]; + ac = tid->ac; - spin_unlock_bh(&txq->axq_lock); + spin_lock_bh(&txq->axq_lock); + + if (tid->sched) { + list_del(&tid->list); + tid->sched = false; } + + if (ac->sched) { + list_del(&ac->list); + tid->ac->sched = false; + } + + ath_tid_drain(sc, txq, tid); + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + + spin_unlock_bh(&txq->axq_lock); } } diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 3a003e6803a5cf6c3a5040bf2af6399ab3e4eaec..8674a99356af644349a17f0e221ce5a22d27b0aa 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -530,7 +530,7 @@ struct b43_fw_header { /* Size of the data. For ucode and PCM this is in bytes. * For IV this is number-of-ivs. */ __be32 size; -} __attribute__((__packed__)); +} __packed; /* Initial Value file format */ #define B43_IV_OFFSET_MASK 0x7FFF @@ -540,8 +540,8 @@ struct b43_iv { union { __be16 d16; __be32 d32; - } data __attribute__((__packed__)); -} __attribute__((__packed__)); + } data __packed; +} __packed; /* Data structures for DMA transmission, per 80211 core. */ diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index fa40fdfea719da1b24767550d2d17c932ba10993..10d0aaf754c5ab22bca4709e0fcc6813ac71fc0a 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -333,11 +333,11 @@ static inline dma_addr_t dmaaddr; if (tx) { - dmaaddr = ssb_dma_map_single(ring->dev->dev, - buf, len, DMA_TO_DEVICE); + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, + buf, len, DMA_TO_DEVICE); } else { - dmaaddr = ssb_dma_map_single(ring->dev->dev, - buf, len, DMA_FROM_DEVICE); + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, + buf, len, DMA_FROM_DEVICE); } return dmaaddr; @@ -348,11 +348,11 @@ static inline dma_addr_t addr, size_t len, int tx) { if (tx) { - ssb_dma_unmap_single(ring->dev->dev, - addr, len, DMA_TO_DEVICE); + dma_unmap_single(ring->dev->dev->dma_dev, + addr, len, DMA_TO_DEVICE); } else { - ssb_dma_unmap_single(ring->dev->dev, - addr, len, DMA_FROM_DEVICE); + dma_unmap_single(ring->dev->dev->dma_dev, + addr, len, DMA_FROM_DEVICE); } } @@ -361,7 +361,7 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - ssb_dma_sync_single_for_cpu(ring->dev->dev, + dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } @@ -370,8 +370,8 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - ssb_dma_sync_single_for_device(ring->dev->dev, - addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(ring->dev->dev->dma_dev, + addr, len, DMA_FROM_DEVICE); } static inline @@ -401,9 +401,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring) */ if (ring->type == B43_DMA_64BIT) flags |= GFP_DMA; - ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, - B43_DMA_RINGMEMSIZE, - &(ring->dmabase), flags); + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, + B43_DMA_RINGMEMSIZE, + &(ring->dmabase), flags); if (!ring->descbase) { b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); return -ENOMEM; @@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring) if (ring->type == B43_DMA_64BIT) flags |= GFP_DMA; - ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, - ring->descbase, ring->dmabase, flags); + dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase); } /* Reset the RX DMA channel */ @@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { - if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) + if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; switch (ring->type) { @@ -874,10 +874,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ - dma_test = ssb_dma_map_single(dev->dev, - ring->txhdr_cache, - b43_txhdr_size(dev), - DMA_TO_DEVICE); + dma_test = dma_map_single(dev->dev->dma_dev, + ring->txhdr_cache, + b43_txhdr_size(dev), + DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { @@ -889,10 +889,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, if (!ring->txhdr_cache) goto err_kfree_meta; - dma_test = ssb_dma_map_single(dev->dev, - ring->txhdr_cache, - b43_txhdr_size(dev), - DMA_TO_DEVICE); + dma_test = dma_map_single(dev->dev->dma_dev, + ring->txhdr_cache, + b43_txhdr_size(dev), + DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { @@ -903,9 +903,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, } } - ssb_dma_unmap_single(dev->dev, - dma_test, b43_txhdr_size(dev), - DMA_TO_DEVICE); + dma_unmap_single(dev->dev->dma_dev, + dma_test, b43_txhdr_size(dev), + DMA_TO_DEVICE); } err = alloc_ringmemory(ring); @@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { - err = ssb_dma_set_mask(dev->dev, mask); - if (!err) - break; + err = dma_set_mask(dev->dev->dma_dev, mask); + if (!err) { + err = dma_set_coherent_mask(dev->dev->dma_dev, mask); + if (!err) + break; + } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); fallback = 1; @@ -1221,14 +1224,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring, meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA); + priv_info->bouncebuffer = kmemdup(skb->data, skb->len, + GFP_ATOMIC | GFP_DMA); if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } - memcpy(priv_info->bouncebuffer, skb->data, skb->len); meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index dc91944d602278d7a6db7ef5c4db59cfe08f5b10..a01c2100f166455d055fa1717574fbb85dfdd3f6 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -67,7 +67,7 @@ struct b43_dmadesc32 { __le32 control; __le32 address; -} __attribute__ ((__packed__)); +} __packed; #define B43_DMA32_DCTL_BYTECNT 0x00001FFF #define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 #define B43_DMA32_DCTL_ADDREXT_SHIFT 16 @@ -140,7 +140,7 @@ struct b43_dmadesc64 { __le32 control1; __le32 address_low; __le32 address_high; -} __attribute__ ((__packed__)); +} __packed; #define B43_DMA64_DCTL0_DTABLEEND 0x10000000 #define B43_DMA64_DCTL0_IRQ 0x20000000 #define B43_DMA64_DCTL0_FRAMEEND 0x40000000 @@ -153,8 +153,8 @@ struct b43_dmadesc_generic { union { struct b43_dmadesc32 dma32; struct b43_dmadesc64 dma64; - } __attribute__ ((__packed__)); -} __attribute__ ((__packed__)); + } __packed; +} __packed; /* Misc DMA constants */ #define B43_DMA_RINGMEMSIZE PAGE_SIZE diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 7965b70efbab5de2d1149eda5eba68f0bd871a9b..20631ae2ddd77bb5893ec101912f79152eb28957 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -108,7 +108,7 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT; module_param_named(verbose, b43_modparam_verbose, int, 0644); MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); -int b43_modparam_pio = B43_PIO_DEFAULT; +static int b43_modparam_pio = B43_PIO_DEFAULT; module_param_named(pio, b43_modparam_pio, int, 0644); MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO"); @@ -1804,7 +1804,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) dma_reason[2], dma_reason[3], dma_reason[4], dma_reason[5]); b43err(dev->wl, "This device does not support DMA " - "on your system. Please use PIO instead.\n"); + "on your system. It will now be switched to PIO.\n"); /* Fall back to PIO transfers if we get fatal DMA errors! */ dev->use_pio = 1; b43_controller_restart(dev, "DMA error"); diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 29bf34ced86552cb35ec284cf45d297c2f0479f7..0dc33b65e86be53a3de7f3642b8793ab06c9ef9a 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -972,7 +972,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) b43_phy_maskset(dev, 0x04A2, 0xFFF0, 0x000B); if (phy->rev >= 3) { - b43_phy_mask(dev, 0x048A, (u16)~0x8000); + b43_phy_mask(dev, 0x048A, 0x7FFF); b43_phy_maskset(dev, 0x0415, 0x8000, 0x36D8); b43_phy_maskset(dev, 0x0416, 0x8000, 0x36D8); b43_phy_maskset(dev, 0x0417, 0xFE00, 0x016D); diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c index c6afe9d94590f2e15cf66db6d908ded47d94ac92..fd50eb116243487f532bca9ac24623cdb1e81c93 100644 --- a/drivers/net/wireless/b43/phy_lp.c +++ b/drivers/net/wireless/b43/phy_lp.c @@ -1145,7 +1145,7 @@ static void lpphy_write_tx_pctl_mode_to_hardware(struct b43_wldev *dev) B43_WARN_ON(1); } b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, - (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, ctl); + ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, ctl); } static void lpphy_set_tx_power_control(struct b43_wldev *dev, @@ -1522,11 +1522,11 @@ static void lpphy_tx_pctl_init_hw(struct b43_wldev *dev) b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xFF); b43_phy_write(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xA); b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, - (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, + ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF); b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xF8FF); b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, - (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, + ~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW); if (dev->phy.rev < 2) { @@ -2698,7 +2698,7 @@ static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev, return B43_TXPWR_RES_DONE; } -void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on) +static void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on) { if (on) { b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8); diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 3d6b33775964add64b73f96dab9c5890cd6d4275..5a725703770cb6d9a01236f0121b052f6cd0cc4b 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -509,7 +509,8 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); - b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS, + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, + ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); @@ -762,7 +763,7 @@ static void b43_nphy_stop_playback(struct b43_wldev *dev) if (tmp & 0x1) b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); else if (tmp & 0x2) - b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000); + b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); @@ -1009,7 +1010,7 @@ static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev) b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3); b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, - (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV, + ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) @@ -1116,7 +1117,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); b43_phy_mask(dev, B43_NPHY_PIL_DW1, - (u16)~B43_NPHY_PIL_DW_64QAM); + ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); @@ -2455,7 +2456,8 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600); regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG); - b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX); + b43_phy_mask(dev, B43_NPHY_BBCFG, + ~B43_NPHY_BBCFG_RSTRX & 0xFFFF); tmp = b43_ntab_read(dev, B43_NTAB16(8, 3)); regs[5] = tmp; @@ -2930,7 +2932,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, tmp[5] = b43_phy_read(dev, rfctl[1]); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, - (u16)~B43_NPHY_RFSEQCA_RXDIS, + ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF, ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, (1 - i)); @@ -3291,7 +3293,7 @@ static void b43_nphy_chanspec_setup(struct b43_wldev *dev, b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); - b43_phy_mask(dev, B43_PHY_B_BBCFG, (u16)~0xC000); + b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); } diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 4e56b7bbcebd0b46c4a56c7822713b62be29446b..45933cf8e8c2297e67728992ebab8891d03b8f16 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -182,6 +182,7 @@ static void b43_sdio_remove(struct sdio_func *func) static const struct sdio_device_id b43_sdio_ids[] = { { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */ + { SDIO_DEVICE(0x0092, 0x0004) }, /* C-guys, Inc. EW-CG1102GC */ { }, }; diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c index 97c79161c208925597b09ce4928af861efb3ec47..9a335da65b423129c54c8245dcdbf890fd531701 100644 --- a/drivers/net/wireless/b43/wa.c +++ b/drivers/net/wireless/b43/wa.c @@ -382,7 +382,7 @@ static void b43_wa_altagc(struct b43_wldev *dev) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 3, 25); } - b43_phy_maskset(dev, B43_PHY_CCKSHIFTBITS_WA, (u16)~0xFF00, 0x5700); + b43_phy_maskset(dev, B43_PHY_CCKSHIFTBITS_WA, 0x00FF, 0x5700); b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x007F, 0x000F); b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x3F80, 0x2B80); b43_phy_maskset(dev, B43_PHY_ANTWRSETT, 0xF0FF, 0x0300); @@ -400,9 +400,9 @@ static void b43_wa_altagc(struct b43_wldev *dev) b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x00FF, 0x0020); b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x3F00, 0x0200); b43_phy_maskset(dev, B43_PHY_OFDM(0x82), ~0x00FF, 0x002E); - b43_phy_maskset(dev, B43_PHY_OFDM(0x96), (u16)~0xFF00, 0x1A00); + b43_phy_maskset(dev, B43_PHY_OFDM(0x96), 0x00FF, 0x1A00); b43_phy_maskset(dev, B43_PHY_OFDM(0x81), ~0x00FF, 0x0028); - b43_phy_maskset(dev, B43_PHY_OFDM(0x81), (u16)~0xFF00, 0x2C00); + b43_phy_maskset(dev, B43_PHY_OFDM(0x81), 0x00FF, 0x2C00); if (phy->rev == 1) { b43_phy_write(dev, B43_PHY_PEAK_COUNT, 0x092B); b43_phy_maskset(dev, B43_PHY_OFDM(0x1B), ~0x001E, 0x0002); @@ -412,7 +412,7 @@ static void b43_wa_altagc(struct b43_wldev *dev) b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, ~0x000F, 0x0004); if (phy->rev >= 6) { b43_phy_write(dev, B43_PHY_OFDM(0x22), 0x287A); - b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, (u16)~0xF000, 0x3000); + b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, 0x0FFF, 0x3000); } } b43_phy_maskset(dev, B43_PHY_DIVSRCHIDX, 0x8080, 0x7874); diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h index d23ff9fe0c9e3fe1acab1c0d383f8c14630844cf..d4cf9b390af3348fdef0c13e00d52d6f0b87e944 100644 --- a/drivers/net/wireless/b43/xmit.h +++ b/drivers/net/wireless/b43/xmit.h @@ -10,8 +10,8 @@ union { \ __le32 data; \ __u8 raw[size]; \ - } __attribute__((__packed__)); \ - } __attribute__((__packed__)) + } __packed; \ + } __packed /* struct b43_plcp_hdr4 */ _b43_declare_plcp_hdr(4); @@ -57,7 +57,7 @@ struct b43_txhdr { __u8 rts_frame[16]; /* The RTS frame (if used) */ PAD_BYTES(2); struct b43_plcp_hdr6 plcp; /* Main PLCP header */ - } new_format __attribute__ ((__packed__)); + } new_format __packed; /* The old r351 format. */ struct { @@ -68,10 +68,10 @@ struct b43_txhdr { __u8 rts_frame[16]; /* The RTS frame (if used) */ PAD_BYTES(2); struct b43_plcp_hdr6 plcp; /* Main PLCP header */ - } old_format __attribute__ ((__packed__)); + } old_format __packed; - } __attribute__ ((__packed__)); -} __attribute__ ((__packed__)); + } __packed; +} __packed; /* MAC TX control */ #define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ @@ -218,20 +218,20 @@ struct b43_rxhdr_fw4 { struct { __u8 jssi; /* PHY RX Status 1: JSSI */ __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ - } __attribute__ ((__packed__)); + } __packed; /* RSSI for N-PHYs */ struct { __s8 power0; /* PHY RX Status 1: Power 0 */ __s8 power1; /* PHY RX Status 1: Power 1 */ - } __attribute__ ((__packed__)); - } __attribute__ ((__packed__)); + } __packed; + } __packed; __le16 phy_status2; /* PHY RX Status 2 */ __le16 phy_status3; /* PHY RX Status 3 */ __le32 mac_status; /* MAC RX status */ __le16 mac_time; __le16 channel; -} __attribute__ ((__packed__)); +} __packed; /* PHY RX Status 0 */ #define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h index 89fe2f972c727913b06c2474fe24b04bc92b735c..c81b2f53b0c508e80a5833baa78c93c989854c8f 100644 --- a/drivers/net/wireless/b43legacy/b43legacy.h +++ b/drivers/net/wireless/b43legacy/b43legacy.h @@ -372,7 +372,7 @@ struct b43legacy_fw_header { /* Size of the data. For ucode and PCM this is in bytes. * For IV this is number-of-ivs. */ __be32 size; -} __attribute__((__packed__)); +} __packed; /* Initial Value file format */ #define B43legacy_IV_OFFSET_MASK 0x7FFF @@ -382,8 +382,8 @@ struct b43legacy_iv { union { __be16 d16; __be32 d32; - } data __attribute__((__packed__)); -} __attribute__((__packed__)); + } data __packed; +} __packed; #define B43legacy_PHYMODE(phytype) (1 << (phytype)) #define B43legacy_PHYMODE_B B43legacy_PHYMODE \ diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index e91520d0312e1d18d8b3c944afaa72975686d1d7..e03e01d0bc355bd5a72d282b62dd39526f0cbdad 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -394,11 +394,11 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, dma_addr_t dmaaddr; if (tx) - dmaaddr = ssb_dma_map_single(ring->dev->dev, + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_TO_DEVICE); else - dmaaddr = ssb_dma_map_single(ring->dev->dev, + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_FROM_DEVICE); @@ -412,11 +412,11 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring, int tx) { if (tx) - ssb_dma_unmap_single(ring->dev->dev, + dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_TO_DEVICE); else - ssb_dma_unmap_single(ring->dev->dev, + dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } @@ -428,8 +428,8 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, { B43legacy_WARN_ON(ring->tx); - ssb_dma_sync_single_for_cpu(ring->dev->dev, - addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(ring->dev->dev->dma_dev, + addr, len, DMA_FROM_DEVICE); } static inline @@ -439,8 +439,8 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, { B43legacy_WARN_ON(ring->tx); - ssb_dma_sync_single_for_device(ring->dev->dev, - addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(ring->dev->dev->dma_dev, + addr, len, DMA_FROM_DEVICE); } static inline @@ -460,10 +460,10 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, static int alloc_ringmemory(struct b43legacy_dmaring *ring) { /* GFP flags must match the flags in free_ringmemory()! */ - ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, - B43legacy_DMA_RINGMEMSIZE, - &(ring->dmabase), - GFP_KERNEL); + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, + B43legacy_DMA_RINGMEMSIZE, + &(ring->dmabase), + GFP_KERNEL); if (!ring->descbase) { b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" " failed\n"); @@ -476,8 +476,8 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring) static void free_ringmemory(struct b43legacy_dmaring *ring) { - ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE, - ring->descbase, ring->dmabase, GFP_KERNEL); + dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase); } /* Reset the RX DMA channel */ @@ -589,7 +589,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, size_t buffersize, bool dma_to_device) { - if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) + if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; switch (ring->type) { @@ -906,7 +906,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ - dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache, + dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, sizeof(struct b43legacy_txhdr_fw3), DMA_TO_DEVICE); @@ -920,7 +920,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, if (!ring->txhdr_cache) goto err_kfree_meta; - dma_test = ssb_dma_map_single(dev->dev, + dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, sizeof(struct b43legacy_txhdr_fw3), DMA_TO_DEVICE); @@ -930,9 +930,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, goto err_kfree_txhdr_cache; } - ssb_dma_unmap_single(dev->dev, dma_test, - sizeof(struct b43legacy_txhdr_fw3), - DMA_TO_DEVICE); + dma_unmap_single(dev->dev->dma_dev, dma_test, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); } ring->nr_slots = nr_slots; @@ -1040,9 +1040,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { - err = ssb_dma_set_mask(dev->dev, mask); - if (!err) - break; + err = dma_set_mask(dev->dev->dma_dev, mask); + if (!err) { + err = dma_set_coherent_mask(dev->dev->dma_dev, mask); + if (!err) + break; + } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); fallback = 1; diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h index f9681041c2d8da6d151cb62a4e9ee36022377d7f..f89c34226288c109aa72dad5eb507773e2b67816 100644 --- a/drivers/net/wireless/b43legacy/dma.h +++ b/drivers/net/wireless/b43legacy/dma.h @@ -72,7 +72,7 @@ struct b43legacy_dmadesc32 { __le32 control; __le32 address; -} __attribute__((__packed__)); +} __packed; #define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF #define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 #define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 @@ -147,7 +147,7 @@ struct b43legacy_dmadesc64 { __le32 control1; __le32 address_low; __le32 address_high; -} __attribute__((__packed__)); +} __packed; #define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 #define B43legacy_DMA64_DCTL0_IRQ 0x20000000 #define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 @@ -162,8 +162,8 @@ struct b43legacy_dmadesc_generic { union { struct b43legacy_dmadesc32 dma32; struct b43legacy_dmadesc64 dma64; - } __attribute__((__packed__)); -} __attribute__((__packed__)); + } __packed; +} __packed; /* Misc DMA constants */ diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h index 91633087a20bfd28279a2972e3d4f8cc85d434ad..289db00a4a7b17e3f72b67db58c0eda6d7318390 100644 --- a/drivers/net/wireless/b43legacy/xmit.h +++ b/drivers/net/wireless/b43legacy/xmit.h @@ -9,8 +9,8 @@ union { \ __le32 data; \ __u8 raw[size]; \ - } __attribute__((__packed__)); \ - } __attribute__((__packed__)) + } __packed; \ + } __packed /* struct b43legacy_plcp_hdr4 */ _b43legacy_declare_plcp_hdr(4); @@ -39,7 +39,7 @@ struct b43legacy_txhdr_fw3 { struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ __u8 rts_frame[18]; /* The RTS frame (if used) */ struct b43legacy_plcp_hdr6 plcp; -} __attribute__((__packed__)); +} __packed; /* MAC TX control */ #define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ @@ -123,7 +123,7 @@ struct b43legacy_hwtxstatus { __le16 seq; u8 phy_stat; PAD_BYTES(1); -} __attribute__((__packed__)); +} __packed; /* Receive header for v3 firmware. */ @@ -138,7 +138,7 @@ struct b43legacy_rxhdr_fw3 { __le16 mac_status; /* MAC RX status */ __le16 mac_time; __le16 channel; -} __attribute__((__packed__)); +} __packed; /* PHY RX Status 0 */ diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h index 7f9d8d976aa85a4ced60ba07a98bc3b37a22d727..ed98ce7c8f6508e47a22cc9ed4cbc583fc7b7d7f 100644 --- a/drivers/net/wireless/hostap/hostap_80211.h +++ b/drivers/net/wireless/hostap/hostap_80211.h @@ -19,35 +19,35 @@ struct hostap_ieee80211_mgmt { __le16 status_code; /* possibly followed by Challenge text */ u8 variable[0]; - } __attribute__ ((packed)) auth; + } __packed auth; struct { __le16 reason_code; - } __attribute__ ((packed)) deauth; + } __packed deauth; struct { __le16 capab_info; __le16 listen_interval; /* followed by SSID and Supported rates */ u8 variable[0]; - } __attribute__ ((packed)) assoc_req; + } __packed assoc_req; struct { __le16 capab_info; __le16 status_code; __le16 aid; /* followed by Supported rates */ u8 variable[0]; - } __attribute__ ((packed)) assoc_resp, reassoc_resp; + } __packed assoc_resp, reassoc_resp; struct { __le16 capab_info; __le16 listen_interval; u8 current_ap[6]; /* followed by SSID and Supported rates */ u8 variable[0]; - } __attribute__ ((packed)) reassoc_req; + } __packed reassoc_req; struct { __le16 reason_code; - } __attribute__ ((packed)) disassoc; + } __packed disassoc; struct { - } __attribute__ ((packed)) probe_req; + } __packed probe_req; struct { u8 timestamp[8]; __le16 beacon_int; @@ -55,9 +55,9 @@ struct hostap_ieee80211_mgmt { /* followed by some of SSID, Supported rates, * FH Params, DS Params, CF Params, IBSS Params, TIM */ u8 variable[0]; - } __attribute__ ((packed)) beacon, probe_resp; + } __packed beacon, probe_resp; } u; -} __attribute__ ((packed)); +} __packed; #define IEEE80211_MGMT_HDR_LEN 24 diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 231dbd77f5f5b31669116799ceeab36467b79963..9cadaa296faccaf7f847a919701c38c51d0cbf7c 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -688,7 +688,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) struct ap_data *ap = data; struct net_device *dev = ap->local->dev; struct ieee80211_hdr *hdr; - u16 fc, status; + u16 status; __le16 *pos; struct sta_info *sta = NULL; char *txt = NULL; @@ -699,7 +699,6 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) } hdr = (struct ieee80211_hdr *) skb->data; - fc = le16_to_cpu(hdr->frame_control); if ((!ieee80211_is_assoc_resp(hdr->frame_control) && !ieee80211_is_reassoc_resp(hdr->frame_control)) || skb->len < IEEE80211_MGMT_HDR_LEN + 4) { diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h index 90b64b09200716c31cb9af9a4e2761ba0ba7100d..4230102ac9e431720e7aa0c4f3aaf153a26cfd8f 100644 --- a/drivers/net/wireless/hostap/hostap_common.h +++ b/drivers/net/wireless/hostap/hostap_common.h @@ -179,7 +179,7 @@ struct hfa384x_comp_ident __le16 variant; __le16 major; __le16 minor; -} __attribute__ ((packed)); +} __packed; #define HFA384X_COMP_ID_PRI 0x15 #define HFA384X_COMP_ID_STA 0x1f @@ -192,14 +192,14 @@ struct hfa384x_sup_range __le16 variant; __le16 bottom; __le16 top; -} __attribute__ ((packed)); +} __packed; struct hfa384x_build_id { __le16 pri_seq; __le16 sec_seq; -} __attribute__ ((packed)); +} __packed; /* FD01 - Download Buffer */ struct hfa384x_rid_download_buffer @@ -207,14 +207,14 @@ struct hfa384x_rid_download_buffer __le16 page; __le16 offset; __le16 length; -} __attribute__ ((packed)); +} __packed; /* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */ struct hfa384x_comms_quality { __le16 comm_qual; /* 0 .. 92 */ __le16 signal_level; /* 27 .. 154 */ __le16 noise_level; /* 27 .. 154 */ -} __attribute__ ((packed)); +} __packed; /* netdevice private ioctls (used, e.g., with iwpriv from user space) */ diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index eb57d1ea361f73738ee22451e29e6b799a82db81..25a2722c8a986ce35a8a37a9076814095cc70ee3 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -186,7 +186,7 @@ int prism2_wds_add(local_info_t *local, u8 *remote_addr, return -ENOBUFS; /* verify that there is room for wds# postfix in the interface name */ - if (strlen(local->dev->name) > IFNAMSIZ - 5) { + if (strlen(local->dev->name) >= IFNAMSIZ - 5) { printk(KERN_DEBUG "'%s' too long base device name\n", local->dev->name); return -EINVAL; @@ -741,9 +741,7 @@ void hostap_set_multicast_list_queue(struct work_struct *work) local_info_t *local = container_of(work, local_info_t, set_multicast_list_queue); struct net_device *dev = local->dev; - struct hostap_interface *iface; - iface = netdev_priv(dev); if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, local->is_promisc)) { printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index 1ba33be98b254ce9ef8a56fcb6c55501d14a9839..1c66b3c1030d7db008f400d5774d58a23490f57a 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h @@ -31,14 +31,14 @@ struct linux_wlan_ng_val { u32 did; u16 status, len; u32 data; -} __attribute__ ((packed)); +} __packed; struct linux_wlan_ng_prism_hdr { u32 msgcode, msglen; char devname[16]; struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal, noise, rate, istx, frmlen; -} __attribute__ ((packed)); +} __packed; struct linux_wlan_ng_cap_hdr { __be32 version; @@ -55,7 +55,7 @@ struct linux_wlan_ng_cap_hdr { __be32 ssi_noise; __be32 preamble; __be32 encoding; -} __attribute__ ((packed)); +} __packed; struct hostap_radiotap_rx { struct ieee80211_radiotap_header hdr; @@ -66,7 +66,7 @@ struct hostap_radiotap_rx { __le16 chan_flags; s8 dbm_antsignal; s8 dbm_antnoise; -} __attribute__ ((packed)); +} __packed; #define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */ #define LWNG_CAPHDR_VERSION 0x80211001 @@ -97,7 +97,7 @@ struct hfa384x_rx_frame { __be16 len; /* followed by frame data; max 2304 bytes */ -} __attribute__ ((packed)); +} __packed; struct hfa384x_tx_frame { @@ -126,14 +126,14 @@ struct hfa384x_tx_frame { __be16 len; /* followed by frame data; max 2304 bytes */ -} __attribute__ ((packed)); +} __packed; struct hfa384x_rid_hdr { __le16 len; __le16 rid; -} __attribute__ ((packed)); +} __packed; /* Macro for converting signal levels (range 27 .. 154) to wireless ext @@ -145,24 +145,24 @@ struct hfa384x_rid_hdr struct hfa384x_scan_request { __le16 channel_list; __le16 txrate; /* HFA384X_RATES_* */ -} __attribute__ ((packed)); +} __packed; struct hfa384x_hostscan_request { __le16 channel_list; __le16 txrate; __le16 target_ssid_len; u8 target_ssid[32]; -} __attribute__ ((packed)); +} __packed; struct hfa384x_join_request { u8 bssid[6]; __le16 channel; -} __attribute__ ((packed)); +} __packed; struct hfa384x_info_frame { __le16 len; __le16 type; -} __attribute__ ((packed)); +} __packed; struct hfa384x_comm_tallies { __le16 tx_unicast_frames; @@ -186,7 +186,7 @@ struct hfa384x_comm_tallies { __le16 rx_discards_wep_undecryptable; __le16 rx_message_in_msg_fragments; __le16 rx_message_in_bad_msg_fragments; -} __attribute__ ((packed)); +} __packed; struct hfa384x_comm_tallies32 { __le32 tx_unicast_frames; @@ -210,7 +210,7 @@ struct hfa384x_comm_tallies32 { __le32 rx_discards_wep_undecryptable; __le32 rx_message_in_msg_fragments; __le32 rx_message_in_bad_msg_fragments; -} __attribute__ ((packed)); +} __packed; struct hfa384x_scan_result_hdr { __le16 reserved; @@ -219,7 +219,7 @@ struct hfa384x_scan_result_hdr { #define HFA384X_SCAN_HOST_INITIATED 1 #define HFA384X_SCAN_FIRMWARE_INITIATED 2 #define HFA384X_SCAN_INQUIRY_FROM_HOST 3 -} __attribute__ ((packed)); +} __packed; #define HFA384X_SCAN_MAX_RESULTS 32 @@ -234,7 +234,7 @@ struct hfa384x_scan_result { u8 ssid[32]; u8 sup_rates[10]; __le16 rate; -} __attribute__ ((packed)); +} __packed; struct hfa384x_hostscan_result { __le16 chid; @@ -248,7 +248,7 @@ struct hfa384x_hostscan_result { u8 sup_rates[10]; __le16 rate; __le16 atim; -} __attribute__ ((packed)); +} __packed; struct comm_tallies_sums { unsigned int tx_unicast_frames; diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 7f0d98b885bc9248f0958de8c77eafdb942f6d85..c24c5efeae1f211e3a68c4b30ebd1dadd840ccf6 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -174,7 +174,7 @@ that only one external action is invoked at a time. #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" -struct pm_qos_request_list ipw2100_pm_qos_req; +static struct pm_qos_request_list ipw2100_pm_qos_req; /* Debugging stuff */ #ifdef CONFIG_IPW2100_DEBUG @@ -3467,10 +3467,8 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv) dma_addr_t p; priv->msg_buffers = - (struct ipw2100_tx_packet *)kmalloc(IPW_COMMAND_POOL_SIZE * - sizeof(struct - ipw2100_tx_packet), - GFP_KERNEL); + kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet), + GFP_KERNEL); if (!priv->msg_buffers) { printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg " "buffers.\n", priv->net_dev->name); @@ -4499,10 +4497,8 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv) } priv->tx_buffers = - (struct ipw2100_tx_packet *)kmalloc(TX_PENDED_QUEUE_LENGTH * - sizeof(struct - ipw2100_tx_packet), - GFP_ATOMIC); + kmalloc(TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet), + GFP_ATOMIC); if (!priv->tx_buffers) { printk(KERN_ERR DRV_NAME ": %s: alloc failed form tx buffers.\n", @@ -4651,9 +4647,9 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv) /* * allocate packets */ - priv->rx_buffers = (struct ipw2100_rx_packet *) - kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet), - GFP_KERNEL); + priv->rx_buffers = kmalloc(RX_QUEUE_LENGTH * + sizeof(struct ipw2100_rx_packet), + GFP_KERNEL); if (!priv->rx_buffers) { IPW_DEBUG_INFO("can't allocate rx packet buffer table\n"); @@ -5233,7 +5229,7 @@ struct security_info_params { u8 auth_mode; u8 replay_counters_number; u8 unicast_using_group; -} __attribute__ ((packed)); +} __packed; static int ipw2100_set_security_information(struct ipw2100_priv *priv, int auth_mode, @@ -8475,7 +8471,7 @@ struct ipw2100_fw_header { short mode; unsigned int fw_size; unsigned int uc_size; -} __attribute__ ((packed)); +} __packed; static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) { diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h index 1eab0d698f4df9046f6a637e061f2177e93f09b6..838002b4881e8d0ff8da96c5f84354a8d0480c41 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/ipw2x00/ipw2100.h @@ -164,7 +164,7 @@ struct bd_status { } fields; u8 field; } info; -} __attribute__ ((packed)); +} __packed; struct ipw2100_bd { u32 host_addr; @@ -174,7 +174,7 @@ struct ipw2100_bd { * 1st TBD) */ u8 num_fragments; u8 reserved[6]; -} __attribute__ ((packed)); +} __packed; #define IPW_BD_QUEUE_LENGTH(n) (1<data.length) { - buf = kmalloc(wrqu->data.length, GFP_KERNEL); + buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); if (buf == NULL) { err = -ENOMEM; goto out; } - memcpy(buf, extra, wrqu->data.length); kfree(ieee->wpa_ie); ieee->wpa_ie = buf; ieee->wpa_ie_len = wrqu->data.length; @@ -12083,7 +12082,7 @@ module_param(auto_create, int, 0444); MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); module_param_named(led, led_support, int, 0444); -MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)"); +MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)"); module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "debug output mask"); diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h index bf0eeb2e873ab17e9fa7df0a673038b648638fc0..d7d049c7a4fa93ae3be77d11632bda0a4c4863ac 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/ipw2x00/ipw2200.h @@ -388,7 +388,7 @@ struct clx2_queue { dma_addr_t dma_addr; /**< physical addr for BD's */ int low_mark; /**< low watermark, resume queue if free space more than this */ int high_mark; /**< high watermark, stop queue if free space less than this */ -} __attribute__ ((packed)); /* XXX */ +} __packed; /* XXX */ struct machdr32 { __le16 frame_ctl; @@ -399,7 +399,7 @@ struct machdr32 { __le16 seq_ctrl; // more endians! u8 addr4[MACADRR_BYTE_LEN]; __le16 qos_ctrl; -} __attribute__ ((packed)); +} __packed; struct machdr30 { __le16 frame_ctl; @@ -409,7 +409,7 @@ struct machdr30 { u8 addr3[MACADRR_BYTE_LEN]; __le16 seq_ctrl; // more endians! u8 addr4[MACADRR_BYTE_LEN]; -} __attribute__ ((packed)); +} __packed; struct machdr26 { __le16 frame_ctl; @@ -419,7 +419,7 @@ struct machdr26 { u8 addr3[MACADRR_BYTE_LEN]; __le16 seq_ctrl; // more endians! __le16 qos_ctrl; -} __attribute__ ((packed)); +} __packed; struct machdr24 { __le16 frame_ctl; @@ -428,20 +428,20 @@ struct machdr24 { u8 addr2[MACADRR_BYTE_LEN]; u8 addr3[MACADRR_BYTE_LEN]; __le16 seq_ctrl; // more endians! -} __attribute__ ((packed)); +} __packed; // TX TFD with 32 byte MAC Header struct tx_tfd_32 { struct machdr32 mchdr; // 32 __le32 uivplaceholder[2]; // 8 -} __attribute__ ((packed)); +} __packed; // TX TFD with 30 byte MAC Header struct tx_tfd_30 { struct machdr30 mchdr; // 30 u8 reserved[2]; // 2 __le32 uivplaceholder[2]; // 8 -} __attribute__ ((packed)); +} __packed; // tx tfd with 26 byte mac header struct tx_tfd_26 { @@ -449,14 +449,14 @@ struct tx_tfd_26 { u8 reserved1[2]; // 2 __le32 uivplaceholder[2]; // 8 u8 reserved2[4]; // 4 -} __attribute__ ((packed)); +} __packed; // tx tfd with 24 byte mac header struct tx_tfd_24 { struct machdr24 mchdr; // 24 __le32 uivplaceholder[2]; // 8 u8 reserved[8]; // 8 -} __attribute__ ((packed)); +} __packed; #define DCT_WEP_KEY_FIELD_LENGTH 16 @@ -465,7 +465,7 @@ struct tfd_command { u8 length; __le16 reserved; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct tfd_data { /* Header */ @@ -504,14 +504,14 @@ struct tfd_data { __le32 num_chunks; __le32 chunk_ptr[NUM_TFD_CHUNKS]; __le16 chunk_len[NUM_TFD_CHUNKS]; -} __attribute__ ((packed)); +} __packed; struct txrx_control_flags { u8 message_type; u8 rx_seq_num; u8 control_bits; u8 reserved; -} __attribute__ ((packed)); +} __packed; #define TFD_SIZE 128 #define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags)) @@ -523,7 +523,7 @@ struct tfd_frame { struct tfd_command cmd; u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; } u; -} __attribute__ ((packed)); +} __packed; typedef void destructor_func(const void *); @@ -559,7 +559,7 @@ struct rate_histogram { __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; } failed; -} __attribute__ ((packed)); +} __packed; /* statistics command response */ struct ipw_cmd_stats { @@ -586,13 +586,13 @@ struct ipw_cmd_stats { __le16 rx_autodetec_no_ofdm; __le16 rx_autodetec_no_barker; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct notif_channel_result { u8 channel_num; struct ipw_cmd_stats stats; u8 uReserved; -} __attribute__ ((packed)); +} __packed; #define SCAN_COMPLETED_STATUS_COMPLETE 1 #define SCAN_COMPLETED_STATUS_ABORTED 2 @@ -602,24 +602,24 @@ struct notif_scan_complete { u8 num_channels; u8 status; u8 reserved; -} __attribute__ ((packed)); +} __packed; struct notif_frag_length { __le16 frag_length; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct notif_beacon_state { __le32 state; __le32 number; -} __attribute__ ((packed)); +} __packed; struct notif_tgi_tx_key { u8 key_state; u8 security_type; u8 station_index; u8 reserved; -} __attribute__ ((packed)); +} __packed; #define SILENCE_OVER_THRESH (1) #define SILENCE_UNDER_THRESH (2) @@ -631,25 +631,25 @@ struct notif_link_deterioration { struct rate_histogram histogram; u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ __le16 silence_count; -} __attribute__ ((packed)); +} __packed; struct notif_association { u8 state; -} __attribute__ ((packed)); +} __packed; struct notif_authenticate { u8 state; struct machdr24 addr; __le16 status; -} __attribute__ ((packed)); +} __packed; struct notif_calibration { u8 data[104]; -} __attribute__ ((packed)); +} __packed; struct notif_noise { __le32 value; -} __attribute__ ((packed)); +} __packed; struct ipw_rx_notification { u8 reserved[8]; @@ -669,7 +669,7 @@ struct ipw_rx_notification { struct notif_noise noise; u8 raw[0]; } u; -} __attribute__ ((packed)); +} __packed; struct ipw_rx_frame { __le32 reserved1; @@ -692,14 +692,14 @@ struct ipw_rx_frame { u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen __le16 length; u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct ipw_rx_header { u8 message_type; u8 rx_seq_num; u8 control_bits; u8 reserved; -} __attribute__ ((packed)); +} __packed; struct ipw_rx_packet { struct ipw_rx_header header; @@ -707,7 +707,7 @@ struct ipw_rx_packet { struct ipw_rx_frame frame; struct ipw_rx_notification notification; } u; -} __attribute__ ((packed)); +} __packed; #define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12 #define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \ @@ -717,7 +717,7 @@ struct ipw_rx_mem_buffer { dma_addr_t dma_addr; struct sk_buff *skb; struct list_head list; -}; /* Not transferred over network, so not __attribute__ ((packed)) */ +}; /* Not transferred over network, so not __packed */ struct ipw_rx_queue { struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; @@ -730,7 +730,7 @@ struct ipw_rx_queue { struct list_head rx_free; /* Own an SKBs */ struct list_head rx_used; /* No SKB allocated */ spinlock_t lock; -}; /* Not transferred over network, so not __attribute__ ((packed)) */ +}; /* Not transferred over network, so not __packed */ struct alive_command_responce { u8 alive_command; @@ -745,21 +745,21 @@ struct alive_command_responce { __le16 reserved4; u8 time_stamp[5]; /* month, day, year, hours, minutes */ u8 ucode_valid; -} __attribute__ ((packed)); +} __packed; #define IPW_MAX_RATES 12 struct ipw_rates { u8 num_rates; u8 rates[IPW_MAX_RATES]; -} __attribute__ ((packed)); +} __packed; struct command_block { unsigned int control; u32 source_addr; u32 dest_addr; unsigned int status; -} __attribute__ ((packed)); +} __packed; #define CB_NUMBER_OF_ELEMENTS_SMALL 64 struct fw_image_desc { @@ -792,7 +792,7 @@ struct ipw_sys_config { u8 accept_all_mgmt_frames; u8 pass_noise_stats_to_host; u8 reserved3; -} __attribute__ ((packed)); +} __packed; struct ipw_multicast_addr { u8 num_of_multicast_addresses; @@ -801,7 +801,7 @@ struct ipw_multicast_addr { u8 mac2[6]; u8 mac3[6]; u8 mac4[6]; -} __attribute__ ((packed)); +} __packed; #define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */ #define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */ @@ -822,7 +822,7 @@ struct ipw_wep_key { u8 key_index; u8 key_size; u8 key[16]; -} __attribute__ ((packed)); +} __packed; struct ipw_tgi_tx_key { u8 key_id; @@ -831,7 +831,7 @@ struct ipw_tgi_tx_key { u8 flags; u8 key[16]; __le32 tx_counter[2]; -} __attribute__ ((packed)); +} __packed; #define IPW_SCAN_CHANNELS 54 @@ -840,7 +840,7 @@ struct ipw_scan_request { __le16 dwell_time; u8 channels_list[IPW_SCAN_CHANNELS]; u8 channels_reserved[3]; -} __attribute__ ((packed)); +} __packed; enum { IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0, @@ -857,7 +857,7 @@ struct ipw_scan_request_ext { u8 scan_type[IPW_SCAN_CHANNELS / 2]; u8 reserved; __le16 dwell_time[IPW_SCAN_TYPES]; -} __attribute__ ((packed)); +} __packed; static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) { @@ -902,7 +902,7 @@ struct ipw_associate { u8 smr; u8 reserved1; __le16 reserved2; -} __attribute__ ((packed)); +} __packed; struct ipw_supported_rates { u8 ieee_mode; @@ -910,36 +910,36 @@ struct ipw_supported_rates { u8 purpose; u8 reserved; u8 supported_rates[IPW_MAX_RATES]; -} __attribute__ ((packed)); +} __packed; struct ipw_rts_threshold { __le16 rts_threshold; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct ipw_frag_threshold { __le16 frag_threshold; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct ipw_retry_limit { u8 short_retry_limit; u8 long_retry_limit; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct ipw_dino_config { __le32 dino_config_addr; __le16 dino_config_size; u8 dino_response; u8 reserved; -} __attribute__ ((packed)); +} __packed; struct ipw_aironet_info { u8 id; u8 length; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct ipw_rx_key { u8 station_index; @@ -950,25 +950,25 @@ struct ipw_rx_key { u8 station_address[6]; u8 key_index; u8 reserved; -} __attribute__ ((packed)); +} __packed; struct ipw_country_channel_info { u8 first_channel; u8 no_channels; s8 max_tx_power; -} __attribute__ ((packed)); +} __packed; struct ipw_country_info { u8 id; u8 length; u8 country_str[3]; struct ipw_country_channel_info groups[7]; -} __attribute__ ((packed)); +} __packed; struct ipw_channel_tx_power { u8 channel_number; s8 tx_power; -} __attribute__ ((packed)); +} __packed; #define SCAN_ASSOCIATED_INTERVAL (HZ) #define SCAN_INTERVAL (HZ / 10) @@ -979,18 +979,18 @@ struct ipw_tx_power { u8 num_channels; u8 ieee_mode; struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS]; -} __attribute__ ((packed)); +} __packed; struct ipw_rsn_capabilities { u8 id; u8 length; __le16 version; -} __attribute__ ((packed)); +} __packed; struct ipw_sensitivity_calib { __le16 beacon_rssi_raw; __le16 reserved; -} __attribute__ ((packed)); +} __packed; /** * Host command structure. @@ -1019,7 +1019,7 @@ struct ipw_cmd { /* XXX */ * nParams=(len+3)/4+status_len */ u32 param[0]; -} __attribute__ ((packed)); +} __packed; #define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ @@ -1114,7 +1114,7 @@ struct ipw_event { /* XXX */ u32 event; u32 time; u32 data; -} __attribute__ ((packed)); +} __packed; struct ipw_fw_error { /* XXX */ unsigned long jiffies; @@ -1125,7 +1125,7 @@ struct ipw_fw_error { /* XXX */ struct ipw_error_elem *elem; struct ipw_event *log; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; #ifdef CONFIG_IPW2200_PROMISCUOUS @@ -1170,7 +1170,7 @@ struct ipw_rt_hdr { s8 rt_dbmnoise; u8 rt_antenna; /* antenna number */ u8 payload[0]; /* payload... */ -} __attribute__ ((packed)); +} __packed; #endif struct ipw_priv { @@ -1957,7 +1957,7 @@ enum { struct ipw_fixed_rate { __le16 tx_rates; __le16 reserved; -} __attribute__ ((packed)); +} __packed; #define IPW_INDIRECT_ADDR_MASK (~0x3ul) @@ -1966,14 +1966,14 @@ struct host_cmd { u8 len; u16 reserved; u32 *param; -} __attribute__ ((packed)); /* XXX */ +} __packed; /* XXX */ struct cmdlog_host_cmd { u8 cmd; u8 len; __le16 reserved; char param[124]; -} __attribute__ ((packed)); +} __packed; struct ipw_cmd_log { unsigned long jiffies; diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h index 284b0e4cb815545369652c3c2222314e7557981f..70f5586d96bdeb060195763d8409fb4f64c27458 100644 --- a/drivers/net/wireless/ipw2x00/libipw.h +++ b/drivers/net/wireless/ipw2x00/libipw.h @@ -154,7 +154,7 @@ struct libipw_snap_hdr { u8 ctrl; /* always 0x03 */ u8 oui[P80211_OUI_LEN]; /* organizational universal id */ -} __attribute__ ((packed)); +} __packed; #define SNAP_SIZE sizeof(struct libipw_snap_hdr) @@ -323,7 +323,7 @@ struct libipw_security { u8 keys[WEP_KEYS][SCM_KEY_LEN]; u8 level; u16 flags; -} __attribute__ ((packed)); +} __packed; /* @@ -347,7 +347,7 @@ struct libipw_hdr_1addr { __le16 duration_id; u8 addr1[ETH_ALEN]; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_hdr_2addr { __le16 frame_ctl; @@ -355,7 +355,7 @@ struct libipw_hdr_2addr { u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_hdr_3addr { __le16 frame_ctl; @@ -365,7 +365,7 @@ struct libipw_hdr_3addr { u8 addr3[ETH_ALEN]; __le16 seq_ctl; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_hdr_4addr { __le16 frame_ctl; @@ -376,7 +376,7 @@ struct libipw_hdr_4addr { __le16 seq_ctl; u8 addr4[ETH_ALEN]; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_hdr_3addrqos { __le16 frame_ctl; @@ -387,13 +387,13 @@ struct libipw_hdr_3addrqos { __le16 seq_ctl; u8 payload[0]; __le16 qos_ctl; -} __attribute__ ((packed)); +} __packed; struct libipw_info_element { u8 id; u8 len; u8 data[0]; -} __attribute__ ((packed)); +} __packed; /* * These are the data types that can make up management packets @@ -406,7 +406,7 @@ struct libipw_info_element { u16 listen_interval; struct { u16 association_id:14, reserved:2; - } __attribute__ ((packed)); + } __packed; u32 time_stamp[2]; u16 reason; u16 status; @@ -419,7 +419,7 @@ struct libipw_auth { __le16 status; /* challenge */ struct libipw_info_element info_element[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_channel_switch { u8 id; @@ -427,7 +427,7 @@ struct libipw_channel_switch { u8 mode; u8 channel; u8 count; -} __attribute__ ((packed)); +} __packed; struct libipw_action { struct libipw_hdr_3addr header; @@ -441,12 +441,12 @@ struct libipw_action { struct libipw_channel_switch channel_switch; } format; -} __attribute__ ((packed)); +} __packed; struct libipw_disassoc { struct libipw_hdr_3addr header; __le16 reason; -} __attribute__ ((packed)); +} __packed; /* Alias deauth for disassoc */ #define libipw_deauth libipw_disassoc @@ -455,7 +455,7 @@ struct libipw_probe_request { struct libipw_hdr_3addr header; /* SSID, supported rates */ struct libipw_info_element info_element[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_probe_response { struct libipw_hdr_3addr header; @@ -465,7 +465,7 @@ struct libipw_probe_response { /* SSID, supported rates, FH params, DS params, * CF params, IBSS params, TIM (if beacon), RSN */ struct libipw_info_element info_element[0]; -} __attribute__ ((packed)); +} __packed; /* Alias beacon for probe_response */ #define libipw_beacon libipw_probe_response @@ -476,7 +476,7 @@ struct libipw_assoc_request { __le16 listen_interval; /* SSID, supported rates, RSN */ struct libipw_info_element info_element[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_reassoc_request { struct libipw_hdr_3addr header; @@ -484,7 +484,7 @@ struct libipw_reassoc_request { __le16 listen_interval; u8 current_ap[ETH_ALEN]; struct libipw_info_element info_element[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_assoc_response { struct libipw_hdr_3addr header; @@ -493,7 +493,7 @@ struct libipw_assoc_response { __le16 aid; /* supported rates */ struct libipw_info_element info_element[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_txb { u8 nr_frags; @@ -555,19 +555,19 @@ struct libipw_qos_information_element { u8 qui_subtype; u8 version; u8 ac_info; -} __attribute__ ((packed)); +} __packed; struct libipw_qos_ac_parameter { u8 aci_aifsn; u8 ecw_min_max; __le16 tx_op_limit; -} __attribute__ ((packed)); +} __packed; struct libipw_qos_parameter_info { struct libipw_qos_information_element info_element; u8 reserved; struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; -} __attribute__ ((packed)); +} __packed; struct libipw_qos_parameters { __le16 cw_min[QOS_QUEUE_NUM]; @@ -575,7 +575,7 @@ struct libipw_qos_parameters { u8 aifs[QOS_QUEUE_NUM]; u8 flag[QOS_QUEUE_NUM]; __le16 tx_op_limit[QOS_QUEUE_NUM]; -} __attribute__ ((packed)); +} __packed; struct libipw_qos_data { struct libipw_qos_parameters parameters; @@ -588,7 +588,7 @@ struct libipw_qos_data { struct libipw_tim_parameters { u8 tim_count; u8 tim_period; -} __attribute__ ((packed)); +} __packed; /*******************************************************/ @@ -606,7 +606,7 @@ struct libipw_basic_report { __le64 start_time; __le16 duration; u8 map; -} __attribute__ ((packed)); +} __packed; enum { /* libipw_measurement_request.mode */ /* Bit 0 is reserved */ @@ -627,7 +627,7 @@ struct libipw_measurement_params { u8 channel; __le64 start_time; __le16 duration; -} __attribute__ ((packed)); +} __packed; struct libipw_measurement_request { struct libipw_info_element ie; @@ -635,7 +635,7 @@ struct libipw_measurement_request { u8 mode; u8 type; struct libipw_measurement_params params[0]; -} __attribute__ ((packed)); +} __packed; struct libipw_measurement_report { struct libipw_info_element ie; @@ -645,17 +645,17 @@ struct libipw_measurement_report { union { struct libipw_basic_report basic[0]; } u; -} __attribute__ ((packed)); +} __packed; struct libipw_tpc_report { u8 transmit_power; u8 link_margin; -} __attribute__ ((packed)); +} __packed; struct libipw_channel_map { u8 channel; u8 map; -} __attribute__ ((packed)); +} __packed; struct libipw_ibss_dfs { struct libipw_info_element ie; @@ -668,14 +668,14 @@ struct libipw_csa { u8 mode; u8 channel; u8 count; -} __attribute__ ((packed)); +} __packed; struct libipw_quiet { u8 count; u8 period; u8 duration; u8 offset; -} __attribute__ ((packed)); +} __packed; struct libipw_network { /* These entries are used to identify a unique network */ @@ -828,7 +828,6 @@ struct libipw_device { int host_strip_iv_icv; int host_open_frag; - int host_build_iv; int ieee802_1x; /* is IEEE 802.1X used */ /* WPA data */ diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c index 55965408ff3f3786e07f0117f5def525f55c8a20..32dee2ce5d3177706f8bc7394794952a87389324 100644 --- a/drivers/net/wireless/ipw2x00/libipw_module.c +++ b/drivers/net/wireless/ipw2x00/libipw_module.c @@ -62,8 +62,8 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); -struct cfg80211_ops libipw_config_ops = { }; -void *libipw_wiphy_privid = &libipw_wiphy_privid; +static struct cfg80211_ops libipw_config_ops = { }; +static void *libipw_wiphy_privid = &libipw_wiphy_privid; static int libipw_networks_allocate(struct libipw_device *ieee) { diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c index da8beac7fcf36e9d57bc1db928d83b6e54f6b80d..01c88a71abe19a47da929e664e38f3656f4dd668 100644 --- a/drivers/net/wireless/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/ipw2x00/libipw_tx.c @@ -260,7 +260,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, rts_required; unsigned long flags; - int encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; + int encrypt, host_encrypt, host_encrypt_msdu; __be16 ether_type; int bytes, fc, hdr_len; struct sk_buff *skb_frag; @@ -301,7 +301,6 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) host_encrypt = ieee->host_encrypt && encrypt && crypt; host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; - host_build_iv = ieee->host_build_iv && encrypt && crypt; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) { @@ -313,7 +312,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) skb_copy_from_linear_data(skb, dest, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN); - if (host_encrypt || host_build_iv) + if (host_encrypt) fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_PROTECTED; else @@ -467,7 +466,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) for (; i < nr_frags; i++) { skb_frag = txb->fragments[i]; - if (host_encrypt || host_build_iv) + if (host_encrypt) skb_reserve(skb_frag, crypt->ops->extra_mpdu_prefix_len); @@ -502,15 +501,6 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) * to insert the IV between the header and the payload */ if (host_encrypt) libipw_encrypt_fragment(ieee, skb_frag, hdr_len); - else if (host_build_iv) { - atomic_inc(&crypt->refcnt); - if (crypt->ops->build_iv) - crypt->ops->build_iv(skb_frag, hdr_len, - ieee->sec.keys[ieee->sec.active_key], - ieee->sec.key_sizes[ieee->sec.active_key], - crypt->priv); - atomic_dec(&crypt->refcnt); - } if (ieee->config & (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c index 3633c6682e4967575ef040ad620fbb8550531c35..d7bd6cf00a81611f04f32cb96cde0cac1c53586d 100644 --- a/drivers/net/wireless/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/ipw2x00/libipw_wx.c @@ -320,7 +320,7 @@ int libipw_wx_set_encode(struct libipw_device *ieee, }; int i, key, key_provided, len; struct lib80211_crypt_data **crypt; - int host_crypto = ieee->host_encrypt || ieee->host_decrypt || ieee->host_build_iv; + int host_crypto = ieee->host_encrypt || ieee->host_decrypt; DECLARE_SSID_BUF(ssid); LIBIPW_DEBUG_WX("SET_ENCODE\n"); @@ -411,10 +411,6 @@ int libipw_wx_set_encode(struct libipw_device *ieee, /* If a new key was provided, set it up */ if (erq->length > 0) { -#ifdef CONFIG_LIBIPW_DEBUG - DECLARE_SSID_BUF(ssid); -#endif - len = erq->length <= 5 ? 5 : 13; memcpy(sec.keys[key], keybuf, erq->length); if (len > erq->length) diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index dc8ed15276667f1abb3ce7b311789a592d2c7ee3..a51e4da1bdfc5d7149074c8adc7555d51fac0250 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -30,9 +30,11 @@ config IWLWIFI_DEBUG config IWLWIFI_DEBUGFS bool "iwlagn debugfs support" - depends on IWLWIFI && IWLWIFI_DEBUG && MAC80211_DEBUGFS + depends on IWLWIFI && MAC80211_DEBUGFS ---help--- - Enable creation of debugfs files for the iwlwifi drivers. + Enable creation of debugfs files for the iwlwifi drivers. This + is a low-impact option that allows getting insight into the + driver's state at runtime. config IWLWIFI_DEVICE_TRACING bool "iwlwifi device access tracing" @@ -85,10 +87,15 @@ config IWL4965 This option enables support for Intel Wireless WiFi Link 4965AGN config IWL5000 - bool "Intel Wireless WiFi 5000AGN; Intel WiFi Link 1000, 6000, and 6050 Series" + bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link" depends on IWLAGN ---help--- - This option enables support for Intel Wireless WiFi Link 5000AGN Family + This option enables support for use with the following hardware: + Intel Wireless WiFi Link 6250AGN Adapter + Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN) + Intel WiFi Link 1000BGN + Intel Wireless WiFi 5150AGN + Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN config IWL3945 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 7c7235385513b8464c22aca463f76ab549bb9b25..728bb858ba9760cefc0367618cf262d42fb529ee 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_IWLWIFI) += iwlcore.o iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o -iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o +iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwlcore-objs += iwl-scan.o iwl-led.o iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o @@ -11,7 +11,7 @@ CFLAGS_iwl-devtrace.o := -I$(src) obj-$(CONFIG_IWLAGN) += iwlagn.o iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o -iwlagn-objs += iwl-agn-lib.o +iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o iwlagn-$(CONFIG_IWL4965) += iwl-4965.o diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index 6be2992f8f210dbe3b0d0dee0b0c43ddd78efe94..8848333bc3a9545e8ed6a9dd3d8905bfeee6e33f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -129,8 +129,8 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) priv->cfg->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); priv->hw_params.tfd_size = sizeof(struct iwl_tfd); - priv->hw_params.max_stations = IWL5000_STATION_COUNT; - priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; + priv->hw_params.max_stations = IWLAGN_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; @@ -157,6 +157,10 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) BIT(IWL_CALIB_TX_IQ) | BIT(IWL_CALIB_TX_IQ_PERD) | BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC); + + priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; return 0; } @@ -213,14 +217,18 @@ static struct iwl_lib_ops iwl1000_lib = { .set_ct_kill = iwl1000_set_ct_threshold, }, .manage_ibss_station = iwlagn_manage_ibss_station, + .update_bcast_station = iwl_update_bcast_station, .debugfs_ops = { .rx_stats_read = iwl_ucode_rx_stats_read, .tx_stats_read = iwl_ucode_tx_stats_read, .general_stats_read = iwl_ucode_general_stats_read, + .bt_stats_read = iwl_ucode_bt_stats_read, }, .recover_from_tx_stall = iwl_bg_monitor_recover, .check_plcp_health = iwl_good_plcp_health, .check_ack_health = iwl_good_ack_health, + .txfifo_flush = iwlagn_txfifo_flush, + .dev_txfifo_flush = iwlagn_dev_txfifo_flush, }; static const struct iwl_ops iwl1000_ops = { diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c index 6a9c64a50e3693eef5120df6acf75d80c360273c..ef0835b01b6b8e6687f994119a29cf38e0ed3ab0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c @@ -28,6 +28,28 @@ #include "iwl-3945-debugfs.h" + +static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", + le32_to_cpu(priv->_3945.statistics.flag)); + if (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + return p; +} + ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -70,7 +92,7 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file, max_cck = &priv->_3945.max_delta.rx.cck; max_general = &priv->_3945.max_delta.rx.general; - pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + pos += iwl3945_statistics_flag(priv, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - OFDM:"); @@ -331,7 +353,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file, accum_tx = &priv->_3945.accum_statistics.tx; delta_tx = &priv->_3945.delta_statistics.tx; max_tx = &priv->_3945.max_delta.tx; - pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + pos += iwl3945_statistics_flag(priv, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Tx:"); @@ -438,7 +460,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file, accum_div = &priv->_3945.accum_statistics.general.div; delta_div = &priv->_3945.delta_statistics.general.div; max_div = &priv->_3945.max_delta.general.div; - pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + pos += iwl3945_statistics_flag(priv, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_General:"); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h index 042f6bc0df133a466548902fd11ff367ef180159..2c9ed2b502a38ed05ba1b92c7ba10930382d2875 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h @@ -175,13 +175,13 @@ struct iwl3945_tfd_tb { __le32 addr; __le32 len; -} __attribute__ ((packed)); +} __packed; struct iwl3945_tfd { __le32 control_flags; struct iwl3945_tfd_tb tbs[4]; u8 __pad[28]; -} __attribute__ ((packed)); +} __packed; #endif /* __iwl_3945_fh_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h index 91bcb4e3cdfbbe7b6e106bd27d209ef39dda5e43..7c731a79363252915243fc01350b8077b50bce58 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h @@ -96,7 +96,7 @@ struct iwl3945_eeprom_txpower_sample { u8 gain_index; /* index into power (gain) setup table ... */ s8 power; /* ... for this pwr level for this chnl group */ u16 v_det; /* PA output voltage */ -} __attribute__ ((packed)); +} __packed; /* * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. @@ -117,7 +117,7 @@ struct iwl3945_eeprom_txpower_group { u8 group_channel; /* "representative" channel # in this band */ s16 temperature; /* h/w temperature at factory calib this band * (signed) */ -} __attribute__ ((packed)); +} __packed; /* * Temperature-based Tx-power compensation data, not band-specific. @@ -131,7 +131,7 @@ struct iwl3945_eeprom_temperature_corr { u32 Tc; u32 Td; u32 Te; -} __attribute__ ((packed)); +} __packed; /* * EEPROM map @@ -215,7 +215,7 @@ struct iwl3945_eeprom { /* abs.ofs: 512 */ struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ u8 reserved16[172]; /* fill out to full 1024 byte block */ -} __attribute__ ((packed)); +} __packed; #define IWL3945_EEPROM_IMG_SIZE 1024 @@ -274,7 +274,7 @@ static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ struct iwl3945_shared { __le32 tx_base_ptr[8]; -} __attribute__ ((packed)); +} __packed; static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) { diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index c44a303e62ed45c2bdc9ea9b77db020eb927c39c..a07310fefcf2d0e56c45d45fabce4251d26e12fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -279,8 +279,8 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; - ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); - tx_info->skb[0] = NULL; + ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); + tx_info->skb = NULL; priv->cfg->ops->lib->txq_free_tfd(priv, txq); } @@ -315,7 +315,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv, return; } - info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); ieee80211_tx_info_clear_status(info); /* Fill the MRR chain with some info about on-chip retransmissions */ @@ -352,7 +352,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv, * RX handler implementations * *****************************************************************************/ -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_DEBUGFS /* * based on the assumption of all statistics counter are in DWORD * FIXME: This function is for debugging, do not deal with @@ -406,6 +406,11 @@ static bool iwl3945_good_plcp_health(struct iwl_priv *priv, unsigned int plcp_msec; unsigned long plcp_received_jiffies; + if (priv->cfg->plcp_delta_threshold == + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { + IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); + return rc; + } memcpy(¤t_stat, pkt->u.raw, sizeof(struct iwl3945_notif_statistics)); /* @@ -460,7 +465,7 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv, IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", (int)sizeof(struct iwl3945_notif_statistics), le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_DEBUGFS iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); #endif iwl_recover_from_statistics(priv, pkt); @@ -475,7 +480,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv, __le32 *flag = (__le32 *)&pkt->u.raw; if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_DEBUGFS memset(&priv->_3945.accum_statistics, 0, sizeof(struct iwl3945_notif_statistics)); memset(&priv->_3945.delta_statistics, 0, @@ -494,158 +499,6 @@ void iwl3945_reply_statistics(struct iwl_priv *priv, * Misc. internal state and helper functions * ******************************************************************************/ -#ifdef CONFIG_IWLWIFI_DEBUG - -/** - * iwl3945_report_frame - dump frame to syslog during debug sessions - * - * You may hack this function to show different aspects of received frames, - * including selective frame dumps. - * group100 parameter selects whether to show 1 out of 100 good frames. - */ -static void _iwl3945_dbg_report_frame(struct iwl_priv *priv, - struct iwl_rx_packet *pkt, - struct ieee80211_hdr *header, int group100) -{ - u32 to_us; - u32 print_summary = 0; - u32 print_dump = 0; /* set to 1 to dump all frames' contents */ - u32 hundred = 0; - u32 dataframe = 0; - __le16 fc; - u16 seq_ctl; - u16 channel; - u16 phy_flags; - u16 length; - u16 status; - u16 bcn_tmr; - u32 tsf_low; - u64 tsf; - u8 rssi; - u8 agc; - u16 sig_avg; - u16 noise_diff; - struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); - struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); - struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); - u8 *data = IWL_RX_DATA(pkt); - - /* MAC header */ - fc = header->frame_control; - seq_ctl = le16_to_cpu(header->seq_ctrl); - - /* metadata */ - channel = le16_to_cpu(rx_hdr->channel); - phy_flags = le16_to_cpu(rx_hdr->phy_flags); - length = le16_to_cpu(rx_hdr->len); - - /* end-of-frame status and timestamp */ - status = le32_to_cpu(rx_end->status); - bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); - tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; - tsf = le64_to_cpu(rx_end->timestamp); - - /* signal statistics */ - rssi = rx_stats->rssi; - agc = rx_stats->agc; - sig_avg = le16_to_cpu(rx_stats->sig_avg); - noise_diff = le16_to_cpu(rx_stats->noise_diff); - - to_us = !compare_ether_addr(header->addr1, priv->mac_addr); - - /* if data frame is to us and all is good, - * (optionally) print summary for only 1 out of every 100 */ - if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) == - cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { - dataframe = 1; - if (!group100) - print_summary = 1; /* print each frame */ - else if (priv->framecnt_to_us < 100) { - priv->framecnt_to_us++; - print_summary = 0; - } else { - priv->framecnt_to_us = 0; - print_summary = 1; - hundred = 1; - } - } else { - /* print summary for all other frames */ - print_summary = 1; - } - - if (print_summary) { - char *title; - int rate; - - if (hundred) - title = "100Frames"; - else if (ieee80211_has_retry(fc)) - title = "Retry"; - else if (ieee80211_is_assoc_resp(fc)) - title = "AscRsp"; - else if (ieee80211_is_reassoc_resp(fc)) - title = "RasRsp"; - else if (ieee80211_is_probe_resp(fc)) { - title = "PrbRsp"; - print_dump = 1; /* dump frame contents */ - } else if (ieee80211_is_beacon(fc)) { - title = "Beacon"; - print_dump = 1; /* dump frame contents */ - } else if (ieee80211_is_atim(fc)) - title = "ATIM"; - else if (ieee80211_is_auth(fc)) - title = "Auth"; - else if (ieee80211_is_deauth(fc)) - title = "DeAuth"; - else if (ieee80211_is_disassoc(fc)) - title = "DisAssoc"; - else - title = "Frame"; - - rate = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); - if (rate == -1) - rate = 0; - else - rate = iwl3945_rates[rate].ieee / 2; - - /* print frame summary. - * MAC addresses show just the last byte (for brevity), - * but you can hack it to show more, if you'd like to. */ - if (dataframe) - IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " - "len=%u, rssi=%d, chnl=%d, rate=%d,\n", - title, le16_to_cpu(fc), header->addr1[5], - length, rssi, channel, rate); - else { - /* src/dst addresses assume managed mode */ - IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, " - "src=0x%02x, rssi=%u, tim=%lu usec, " - "phy=0x%02x, chnl=%d\n", - title, le16_to_cpu(fc), header->addr1[5], - header->addr3[5], rssi, - tsf_low - priv->scan_start_tsf, - phy_flags, channel); - } - } - if (print_dump) - iwl_print_hex_dump(priv, IWL_DL_RX, data, length); -} - -static void iwl3945_dbg_report_frame(struct iwl_priv *priv, - struct iwl_rx_packet *pkt, - struct ieee80211_hdr *header, int group100) -{ - if (iwl_get_debug_level(priv) & IWL_DL_RX) - _iwl3945_dbg_report_frame(priv, pkt, header, group100); -} - -#else -static inline void iwl3945_dbg_report_frame(struct iwl_priv *priv, - struct iwl_rx_packet *pkt, - struct ieee80211_hdr *header, int group100) -{ -} -#endif /* This is necessary only for a number of statistics, see the caller. */ static int iwl3945_is_network_packet(struct iwl_priv *priv, @@ -777,8 +630,6 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, rx_status.signal, rx_status.signal, rx_status.rate_idx); - /* Set "1" to report good data frames in groups of 100 */ - iwl3945_dbg_report_frame(priv, pkt, header, 1); iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); if (network_packet) { @@ -850,25 +701,28 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) /* Unmap tx_cmd */ if (counter) pci_unmap_single(dev, - pci_unmap_addr(&txq->meta[index], mapping), - pci_unmap_len(&txq->meta[index], len), + dma_unmap_addr(&txq->meta[index], mapping), + dma_unmap_len(&txq->meta[index], len), PCI_DMA_TODEVICE); /* unmap chunks if any */ - for (i = 1; i < counter; i++) { + for (i = 1; i < counter; i++) pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); - if (txq->txb[txq->q.read_ptr].skb[0]) { - struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0]; - if (txq->txb[txq->q.read_ptr].skb[0]) { - /* Can be called from interrupt context */ - dev_kfree_skb_any(skb); - txq->txb[txq->q.read_ptr].skb[0] = NULL; - } + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; } } - return ; } /** @@ -947,8 +801,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); } -static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, - u16 tx_rate, u8 flags) +static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate) { unsigned long flags_spin; struct iwl_station_entry *station; @@ -962,10 +815,9 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; station->sta.rate_n_flags = cpu_to_le16(tx_rate); station->sta.mode = STA_CONTROL_MODIFY_MSK; - + iwl_send_add_sta(priv, &station->sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - iwl_send_add_sta(priv, &station->sta, flags); IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", sta_id, tx_rate); return sta_id; @@ -997,7 +849,7 @@ static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) { - iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr); + iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), @@ -2473,8 +2325,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv, iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id, (priv->band == IEEE80211_BAND_5GHZ) ? - IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, - CMD_ASYNC); + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP); iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id); return 0; @@ -2590,6 +2441,7 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; + priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS; return 0; } diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h index cd4b61ae25b787021b7392187529c87d7962252f..9166794eda0ddee2838f6c0e5f06b93558f93601 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h @@ -787,6 +787,6 @@ enum { struct iwl4965_scd_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; -} __attribute__ ((packed)); +} __packed; #endif /* !__iwl_4965_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index d3afddae8d9f18d6f308bb79189ed5deb5921d39..d6531ad3906a5d2a407ff2c5c106a6cf335ead10 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -346,9 +346,19 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv) { struct iwl_chain_noise_data *data = &(priv->chain_noise_data); - if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && + iwl_is_associated(priv)) { struct iwl_calib_diff_gain_cmd cmd; + /* clear data for chain noise calibration algorithm */ + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + memset(&cmd, 0, sizeof(cmd)); cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; cmd.diff_gain_a = 0; @@ -419,13 +429,6 @@ static void iwl4965_gain_computation(struct iwl_priv *priv, /* Mark so we run this algo only once! */ data->state = IWL_CHAIN_NOISE_CALIBRATED; } - data->chain_noise_a = 0; - data->chain_noise_b = 0; - data->chain_noise_c = 0; - data->chain_signal_a = 0; - data->chain_signal_b = 0; - data->chain_signal_c = 0; - data->beacon_count = 0; } static void iwl4965_bg_txpower_work(struct work_struct *work) @@ -669,6 +672,7 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); priv->hw_params.sens = &iwl4965_sensitivity; + priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; return 0; } @@ -1441,7 +1445,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv) return ret; } -static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) +static int iwl4965_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) { int rc; u8 band = 0; @@ -1449,11 +1454,14 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) u8 ctrl_chan_high = 0; struct iwl4965_channel_switch_cmd cmd; const struct iwl_channel_info *ch_info; - + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); + struct ieee80211_vif *vif = priv->vif; band = priv->band == IEEE80211_BAND_2GHZ; - ch_info = iwl_get_channel_info(priv, priv->band, channel); - is_ht40 = is_ht40_channel(priv->staging_rxon.flags); if (is_ht40 && @@ -1462,26 +1470,56 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) cmd.band = band; cmd.expect_beacon = 0; - cmd.channel = cpu_to_le16(channel); + ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); + cmd.channel = cpu_to_le16(ch); cmd.rxon_flags = priv->staging_rxon.flags; cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_get_channel_info(priv, priv->band, ch); if (ch_info) cmd.expect_beacon = is_channel_radar(ch_info); else { IWL_ERR(priv, "invalid channel switch from %u to %u\n", - priv->active_rxon.channel, channel); + priv->active_rxon.channel, ch); return -EFAULT; } - rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40, + rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40, ctrl_chan_high, &cmd.tx_power); if (rc) { IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc); return rc; } - priv->switch_rxon.channel = cpu_to_le16(channel); + priv->switch_rxon.channel = cmd.channel; priv->switch_rxon.switch_in_progress = true; return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); @@ -1542,7 +1580,8 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv) u32 R4; if (test_bit(STATUS_TEMPERATURE, &priv->status) && - (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { + (priv->_agn.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); @@ -1566,8 +1605,8 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv) if (!test_bit(STATUS_TEMPERATURE, &priv->status)) vt = sign_extend(R4, 23); else - vt = sign_extend( - le32_to_cpu(priv->statistics.general.temperature), 23); + vt = sign_extend(le32_to_cpu(priv->_agn.statistics. + general.common.temperature), 23); IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); @@ -1747,6 +1786,7 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, { unsigned long flags; u16 ra_tid; + int ret; if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues @@ -1762,7 +1802,9 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, ra_tid = BUILD_RAxTID(sta_id, tid); /* Modify device's station table to Tx this TID */ - iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); + ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); + if (ret) + return ret; spin_lock_irqsave(&priv->lock, flags); @@ -1870,7 +1912,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", agg->frame_count, agg->start_idx, idx); - info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); + info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); info->status.rates[0].count = tx_resp->failure_frame + 1; info->flags &= ~IEEE80211_TX_CTL_AMPDU; info->flags |= iwl_tx_status_to_mac80211(status); @@ -2026,6 +2068,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, int sta_id; int freed; u8 *qc = NULL; + unsigned long flags; if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " @@ -2035,7 +2078,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, return; } - info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); memset(&info->status, 0, sizeof(info->status)); hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); @@ -2050,10 +2093,10 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, return; } + spin_lock_irqsave(&priv->sta_lock, flags); if (txq->sched_retry) { const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); struct iwl_ht_agg *agg = NULL; - WARN_ON(!qc); agg = &priv->stations[sta_id].tid[tid].agg; @@ -2110,6 +2153,8 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); iwl_check_abort_status(priv, tx_resp->frame_count, status); + + spin_unlock_irqrestore(&priv->sta_lock, flags); } static int iwl4965_calc_rssi(struct iwl_priv *priv, @@ -2235,11 +2280,14 @@ static struct iwl_lib_ops iwl4965_lib = { .set_ct_kill = iwl4965_set_ct_threshold, }, .manage_ibss_station = iwlagn_manage_ibss_station, + .update_bcast_station = iwl_update_bcast_station, .debugfs_ops = { .rx_stats_read = iwl_ucode_rx_stats_read, .tx_stats_read = iwl_ucode_tx_stats_read, .general_stats_read = iwl_ucode_general_stats_read, + .bt_stats_read = iwl_ucode_bt_stats_read, }, + .recover_from_tx_stall = iwl_bg_monitor_recover, .check_plcp_health = iwl_good_plcp_health, }; @@ -2285,7 +2333,7 @@ struct iwl_cfg iwl4965_agn_cfg = { * Force use of chains B and C for scan RX on 5 GHz band * because the device has off-channel reception on chain A. */ - .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, + .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, }; /* Module firmware */ diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index a28af7eb67eb464775369f7ff0617aed88847547..8093ce2804fb31d3fb5e00cbc7f49da19d9543b6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -179,8 +179,8 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) priv->cfg->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); priv->hw_params.tfd_size = sizeof(struct iwl_tfd); - priv->hw_params.max_stations = IWL5000_STATION_COUNT; - priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; + priv->hw_params.max_stations = IWLAGN_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; @@ -208,6 +208,8 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) BIT(IWL_CALIB_TX_IQ_PERD) | BIT(IWL_CALIB_BASE_BAND); + priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; + return 0; } @@ -224,8 +226,8 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) priv->cfg->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); priv->hw_params.tfd_size = sizeof(struct iwl_tfd); - priv->hw_params.max_stations = IWL5000_STATION_COUNT; - priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; + priv->hw_params.max_stations = IWLAGN_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; @@ -247,10 +249,13 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) /* Set initial calibration set */ priv->hw_params.sens = &iwl5150_sensitivity; priv->hw_params.calib_init_cfg = - BIT(IWL_CALIB_DC) | BIT(IWL_CALIB_LO) | BIT(IWL_CALIB_TX_IQ) | BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC); + + priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; return 0; } @@ -260,40 +265,76 @@ static void iwl5150_temperature(struct iwl_priv *priv) u32 vt = 0; s32 offset = iwl_temp_calib_to_offset(priv); - vt = le32_to_cpu(priv->statistics.general.temperature); + vt = le32_to_cpu(priv->_agn.statistics.general.common.temperature); vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; /* now vt hold the temperature in Kelvin */ priv->temperature = KELVIN_TO_CELSIUS(vt); iwl_tt_handler(priv); } -static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) +static int iwl5000_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) { struct iwl5000_channel_switch_cmd cmd; const struct iwl_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); + struct ieee80211_vif *vif = priv->vif; struct iwl_host_cmd hcmd = { .id = REPLY_CHANNEL_SWITCH, .len = sizeof(cmd), - .flags = CMD_SIZE_HUGE, + .flags = CMD_SYNC, .data = &cmd, }; - IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", - priv->active_rxon.channel, channel); cmd.band = priv->band == IEEE80211_BAND_2GHZ; - cmd.channel = cpu_to_le16(channel); + ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); + IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", + priv->active_rxon.channel, ch); + cmd.channel = cpu_to_le16(ch); cmd.rxon_flags = priv->staging_rxon.flags; cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); - ch_info = iwl_get_channel_info(priv, priv->band, channel); + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_get_channel_info(priv, priv->band, ch); if (ch_info) cmd.expect_beacon = is_channel_radar(ch_info); else { IWL_ERR(priv, "invalid channel switch from %u to %u\n", - priv->active_rxon.channel, channel); + priv->active_rxon.channel, ch); return -EFAULT; } - priv->switch_rxon.channel = cpu_to_le16(channel); + priv->switch_rxon.channel = cmd.channel; priv->switch_rxon.switch_in_progress = true; return iwl_send_cmd_sync(priv, &hcmd); @@ -352,14 +393,18 @@ static struct iwl_lib_ops iwl5000_lib = { .set_ct_kill = iwl5000_set_ct_threshold, }, .manage_ibss_station = iwlagn_manage_ibss_station, + .update_bcast_station = iwl_update_bcast_station, .debugfs_ops = { .rx_stats_read = iwl_ucode_rx_stats_read, .tx_stats_read = iwl_ucode_tx_stats_read, .general_stats_read = iwl_ucode_general_stats_read, + .bt_stats_read = iwl_ucode_bt_stats_read, }, .recover_from_tx_stall = iwl_bg_monitor_recover, .check_plcp_health = iwl_good_plcp_health, .check_ack_health = iwl_good_ack_health, + .txfifo_flush = iwlagn_txfifo_flush, + .dev_txfifo_flush = iwlagn_dev_txfifo_flush, }; static struct iwl_lib_ops iwl5150_lib = { @@ -414,6 +459,7 @@ static struct iwl_lib_ops iwl5150_lib = { .set_ct_kill = iwl5150_set_ct_threshold, }, .manage_ibss_station = iwlagn_manage_ibss_station, + .update_bcast_station = iwl_update_bcast_station, .debugfs_ops = { .rx_stats_read = iwl_ucode_rx_stats_read, .tx_stats_read = iwl_ucode_tx_stats_read, @@ -422,6 +468,8 @@ static struct iwl_lib_ops iwl5150_lib = { .recover_from_tx_stall = iwl_bg_monitor_recover, .check_plcp_health = iwl_good_plcp_health, .check_ack_health = iwl_good_ack_health, + .txfifo_flush = iwlagn_txfifo_flush, + .dev_txfifo_flush = iwlagn_dev_txfifo_flush, }; static const struct iwl_ops iwl5000_ops = { @@ -620,6 +668,7 @@ struct iwl_cfg iwl5150_agn_cfg = { .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, + .need_dc_calib = true, }; struct iwl_cfg iwl5150_abg_cfg = { @@ -649,6 +698,7 @@ struct iwl_cfg iwl5150_abg_cfg = { .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, + .need_dc_calib = true, }; MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 9fbf54cd3e1a971b057392d5edec2f2cabcac4c5..58270529a0e4efc4ea3198ce9aa27fc45d1c9707 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -71,6 +71,10 @@ #define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode" #define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api) +#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-" +#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" +#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) + static void iwl6000_set_ct_threshold(struct iwl_priv *priv) { @@ -80,9 +84,10 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv) } /* Indicate calibration version to uCode. */ -static void iwl6050_set_calib_version(struct iwl_priv *priv) +static void iwl6000_set_calib_version(struct iwl_priv *priv) { - if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) + if (priv->cfg->need_dc_calib && + (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)) iwl_set_bit(priv, CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); } @@ -155,8 +160,8 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) priv->cfg->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); priv->hw_params.tfd_size = sizeof(struct iwl_tfd); - priv->hw_params.max_stations = IWL5000_STATION_COUNT; - priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; + priv->hw_params.max_stations = IWLAGN_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; @@ -182,83 +187,77 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) BIT(IWL_CALIB_LO) | BIT(IWL_CALIB_TX_IQ) | BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC); - return 0; -} - -static int iwl6050_hw_set_hw_params(struct iwl_priv *priv) -{ - if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && - priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) - priv->cfg->num_of_queues = - priv->cfg->mod_params->num_of_queues; - - priv->hw_params.max_txq_num = priv->cfg->num_of_queues; - priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; - priv->hw_params.scd_bc_tbls_size = - priv->cfg->num_of_queues * - sizeof(struct iwlagn_scd_bc_tbl); - priv->hw_params.tfd_size = sizeof(struct iwl_tfd); - priv->hw_params.max_stations = IWL5000_STATION_COUNT; - priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; - - priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; - priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; - - priv->hw_params.max_bsm_size = 0; - priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | - BIT(IEEE80211_BAND_5GHZ); - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; - - priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); - priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; - priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; - - if (priv->cfg->ops->lib->temp_ops.set_ct_kill) - priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); - - /* Set initial sensitivity parameters */ - /* Set initial calibration set */ - priv->hw_params.sens = &iwl6000_sensitivity; - priv->hw_params.calib_init_cfg = - BIT(IWL_CALIB_XTAL) | - BIT(IWL_CALIB_DC) | - BIT(IWL_CALIB_LO) | - BIT(IWL_CALIB_TX_IQ) | - BIT(IWL_CALIB_BASE_BAND); + priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; return 0; } -static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel) +static int iwl6000_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) { struct iwl6000_channel_switch_cmd cmd; const struct iwl_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); + struct ieee80211_vif *vif = priv->vif; struct iwl_host_cmd hcmd = { .id = REPLY_CHANNEL_SWITCH, .len = sizeof(cmd), - .flags = CMD_SIZE_HUGE, + .flags = CMD_SYNC, .data = &cmd, }; - IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", - priv->active_rxon.channel, channel); - cmd.band = priv->band == IEEE80211_BAND_2GHZ; - cmd.channel = cpu_to_le16(channel); + ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); + IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", + priv->active_rxon.channel, ch); + cmd.channel = cpu_to_le16(ch); cmd.rxon_flags = priv->staging_rxon.flags; cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; - cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); - ch_info = iwl_get_channel_info(priv, priv->band, channel); + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_get_channel_info(priv, priv->band, ch); if (ch_info) cmd.expect_beacon = is_channel_radar(ch_info); else { IWL_ERR(priv, "invalid channel switch from %u to %u\n", - priv->active_rxon.channel, channel); + priv->active_rxon.channel, ch); return -EFAULT; } - priv->switch_rxon.channel = cpu_to_le16(channel); + priv->switch_rxon.channel = cmd.channel; priv->switch_rxon.switch_in_progress = true; return iwl_send_cmd_sync(priv, &hcmd); @@ -316,16 +315,21 @@ static struct iwl_lib_ops iwl6000_lib = { .temp_ops = { .temperature = iwlagn_temperature, .set_ct_kill = iwl6000_set_ct_threshold, + .set_calib_version = iwl6000_set_calib_version, }, .manage_ibss_station = iwlagn_manage_ibss_station, + .update_bcast_station = iwl_update_bcast_station, .debugfs_ops = { .rx_stats_read = iwl_ucode_rx_stats_read, .tx_stats_read = iwl_ucode_tx_stats_read, .general_stats_read = iwl_ucode_general_stats_read, + .bt_stats_read = iwl_ucode_bt_stats_read, }, .recover_from_tx_stall = iwl_bg_monitor_recover, .check_plcp_health = iwl_good_plcp_health, .check_ack_health = iwl_good_ack_health, + .txfifo_flush = iwlagn_txfifo_flush, + .dev_txfifo_flush = iwlagn_dev_txfifo_flush, }; static const struct iwl_ops iwl6000_ops = { @@ -335,79 +339,25 @@ static const struct iwl_ops iwl6000_ops = { .led = &iwlagn_led_ops, }; -static struct iwl_lib_ops iwl6050_lib = { - .set_hw_params = iwl6050_hw_set_hw_params, - .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, - .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_agg_enable = iwlagn_txq_agg_enable, - .txq_agg_disable = iwlagn_txq_agg_disable, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, - .rx_handler_setup = iwlagn_rx_handler_setup, - .setup_deferred_work = iwlagn_setup_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .load_ucode = iwlagn_load_ucode, - .dump_nic_event_log = iwl_dump_nic_event_log, - .dump_nic_error_log = iwl_dump_nic_error_log, - .dump_csr = iwl_dump_csr, - .dump_fh = iwl_dump_fh, - .init_alive_start = iwlagn_init_alive_start, - .alive_notify = iwlagn_alive_notify, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, - .set_channel_switch = iwl6000_hw_channel_switch, - .apm_ops = { - .init = iwl_apm_init, - .stop = iwl_apm_stop, - .config = iwl6000_nic_config, - .set_pwr_src = iwl_set_pwr_src, - }, - .eeprom_ops = { - .regulatory_bands = { - EEPROM_REG_BAND_1_CHANNELS, - EEPROM_REG_BAND_2_CHANNELS, - EEPROM_REG_BAND_3_CHANNELS, - EEPROM_REG_BAND_4_CHANNELS, - EEPROM_REG_BAND_5_CHANNELS, - EEPROM_6000_REG_BAND_24_HT40_CHANNELS, - EEPROM_REG_BAND_52_HT40_CHANNELS - }, - .verify_signature = iwlcore_eeprom_verify_signature, - .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, - .release_semaphore = iwlcore_eeprom_release_semaphore, - .calib_version = iwlagn_eeprom_calib_version, - .query_addr = iwlagn_eeprom_query_addr, - .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, - }, - .post_associate = iwl_post_associate, - .isr = iwl_isr_ict, - .config_ap = iwl_config_ap, - .temp_ops = { - .temperature = iwlagn_temperature, - .set_ct_kill = iwl6000_set_ct_threshold, - .set_calib_version = iwl6050_set_calib_version, - }, - .manage_ibss_station = iwlagn_manage_ibss_station, - .debugfs_ops = { - .rx_stats_read = iwl_ucode_rx_stats_read, - .tx_stats_read = iwl_ucode_tx_stats_read, - .general_stats_read = iwl_ucode_general_stats_read, - }, - .recover_from_tx_stall = iwl_bg_monitor_recover, - .check_plcp_health = iwl_good_plcp_health, - .check_ack_health = iwl_good_ack_health, +static void do_not_send_bt_config(struct iwl_priv *priv) +{ +} + +static struct iwl_hcmd_ops iwl6000g2b_hcmd = { + .rxon_assoc = iwlagn_send_rxon_assoc, + .commit_rxon = iwl_commit_rxon, + .set_rxon_chain = iwl_set_rxon_chain, + .set_tx_ant = iwlagn_send_tx_ant_config, + .send_bt_config = do_not_send_bt_config, }; -static const struct iwl_ops iwl6050_ops = { - .lib = &iwl6050_lib, - .hcmd = &iwlagn_hcmd, +static const struct iwl_ops iwl6000g2b_ops = { + .lib = &iwl6000_lib, + .hcmd = &iwl6000g2b_hcmd, .utils = &iwlagn_hcmd_utils, .led = &iwlagn_led_ops, }; - struct iwl_cfg iwl6000g2a_2agn_cfg = { .name = "6000 Series 2x2 AGN Gen2a", .fw_name_pre = IWL6000G2A_FW_PRE, @@ -443,6 +393,299 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = { .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, + .need_dc_calib = true, +}; + +struct iwl_cfg iwl6000g2a_2abg_cfg = { + .name = "6000 Series 2x2 ABG Gen2a", + .fw_name_pre = IWL6000G2A_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .ops = &iwl6000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, +}; + +struct iwl_cfg iwl6000g2a_2bg_cfg = { + .name = "6000 Series 2x2 BG Gen2a", + .fw_name_pre = IWL6000G2A_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_G, + .ops = &iwl6000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, +}; + +struct iwl_cfg iwl6000g2b_2agn_cfg = { + .name = "6000 Series 2x2 AGN Gen2b", + .fw_name_pre = IWL6000G2B_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl6000g2b_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, + .bt_statistics = true, +}; + +struct iwl_cfg iwl6000g2b_2abg_cfg = { + .name = "6000 Series 2x2 ABG Gen2b", + .fw_name_pre = IWL6000G2B_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .ops = &iwl6000g2b_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, + .bt_statistics = true, +}; + +struct iwl_cfg iwl6000g2b_2bgn_cfg = { + .name = "6000 Series 2x2 BGN Gen2b", + .fw_name_pre = IWL6000G2B_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_G|IWL_SKU_N, + .ops = &iwl6000g2b_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, + .bt_statistics = true, +}; + +struct iwl_cfg iwl6000g2b_2bg_cfg = { + .name = "6000 Series 2x2 BG Gen2b", + .fw_name_pre = IWL6000G2B_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_G, + .ops = &iwl6000g2b_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, + .bt_statistics = true, +}; + +struct iwl_cfg iwl6000g2b_bgn_cfg = { + .name = "6000 Series 1x2 BGN Gen2b", + .fw_name_pre = IWL6000G2B_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_G|IWL_SKU_N, + .ops = &iwl6000g2b_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_A, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, + .bt_statistics = true, +}; + +struct iwl_cfg iwl6000g2b_bg_cfg = { + .name = "6000 Series 1x2 BG Gen2b", + .fw_name_pre = IWL6000G2B_FW_PRE, + .ucode_api_max = IWL6000G2_UCODE_API_MAX, + .ucode_api_min = IWL6000G2_UCODE_API_MIN, + .sku = IWL_SKU_G, + .ops = &iwl6000g2b_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_A, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 512, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, + .bt_statistics = true, }; /* @@ -561,7 +804,7 @@ struct iwl_cfg iwl6050_2agn_cfg = { .ucode_api_max = IWL6050_UCODE_API_MAX, .ucode_api_min = IWL6050_UCODE_API_MIN, .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, - .ops = &iwl6050_ops, + .ops = &iwl6000_ops, .eeprom_size = OTP_LOW_IMAGE_SIZE, .eeprom_ver = EEPROM_6050_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, @@ -590,6 +833,45 @@ struct iwl_cfg iwl6050_2agn_cfg = { .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, + .need_dc_calib = true, +}; + +struct iwl_cfg iwl6050g2_bgn_cfg = { + .name = "6050 Series 1x2 BGN Gen2", + .fw_name_pre = IWL6050_FW_PRE, + .ucode_api_max = IWL6050_UCODE_API_MAX, + .ucode_api_min = IWL6050_UCODE_API_MIN, + .sku = IWL_SKU_G|IWL_SKU_N, + .ops = &iwl6000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .mod_params = &iwlagn_mod_params, + .valid_tx_ant = ANT_A, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x50, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, + .monitor_recover_period = IWL_MONITORING_PERIOD, + .max_event_log_size = 1024, + .ucode_tracing = true, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .need_dc_calib = true, }; struct iwl_cfg iwl6050_2abg_cfg = { @@ -598,7 +880,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { .ucode_api_max = IWL6050_UCODE_API_MAX, .ucode_api_min = IWL6050_UCODE_API_MIN, .sku = IWL_SKU_A|IWL_SKU_G, - .ops = &iwl6050_ops, + .ops = &iwl6000_ops, .eeprom_size = OTP_LOW_IMAGE_SIZE, .eeprom_ver = EEPROM_6050_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, @@ -625,6 +907,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, + .need_dc_calib = true, }; struct iwl_cfg iwl6000_3agn_cfg = { @@ -667,3 +950,4 @@ struct iwl_cfg iwl6000_3agn_cfg = { MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c similarity index 83% rename from drivers/net/wireless/iwlwifi/iwl-calib.c rename to drivers/net/wireless/iwlwifi/iwl-agn-calib.c index 7e822777321331cb5f9ae8303e16613247c2c580..c4c5691032a601a0dcc34e9d7f44fa8f81f1eab2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c @@ -96,17 +96,16 @@ int iwl_send_calib_results(struct iwl_priv *priv) hcmd.len = priv->calib_results[i].buf_len; hcmd.data = priv->calib_results[i].buf; ret = iwl_send_cmd_sync(priv, &hcmd); - if (ret) - goto err; + if (ret) { + IWL_ERR(priv, "Error %d iteration %d\n", + ret, i); + break; + } } } - return 0; -err: - IWL_ERR(priv, "Error %d iteration %d\n", ret, i); return ret; } -EXPORT_SYMBOL(iwl_send_calib_results); int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) { @@ -121,7 +120,6 @@ int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) memcpy(res->buf, buf, len); return 0; } -EXPORT_SYMBOL(iwl_calib_set); void iwl_calib_free_results(struct iwl_priv *priv) { @@ -133,7 +131,6 @@ void iwl_calib_free_results(struct iwl_priv *priv) priv->calib_results[i].buf_len = 0; } } -EXPORT_SYMBOL(iwl_calib_free_results); /***************************************************************************** * RUNTIME calibrations framework @@ -412,46 +409,34 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv, return 0; } -/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ -static int iwl_sensitivity_write(struct iwl_priv *priv) +static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, + struct iwl_sensitivity_data *data, + __le16 *tbl) { - struct iwl_sensitivity_cmd cmd ; - struct iwl_sensitivity_data *data = NULL; - struct iwl_host_cmd cmd_out = { - .id = SENSITIVITY_CMD, - .len = sizeof(struct iwl_sensitivity_cmd), - .flags = CMD_ASYNC, - .data = &cmd, - }; - - data = &(priv->sensitivity_data); - - memset(&cmd, 0, sizeof(cmd)); - - cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = cpu_to_le16((u16)data->auto_corr_ofdm); - cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = cpu_to_le16((u16)data->auto_corr_ofdm_mrc); - cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = cpu_to_le16((u16)data->auto_corr_ofdm_x1); - cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); - cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = cpu_to_le16((u16)data->auto_corr_cck); - cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = cpu_to_le16((u16)data->auto_corr_cck_mrc); - cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] = + tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = cpu_to_le16((u16)data->nrg_th_cck); - cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] = + tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = cpu_to_le16((u16)data->nrg_th_ofdm); - cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = cpu_to_le16(data->barker_corr_th_min); - cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = cpu_to_le16(data->barker_corr_th_min_mrc); - cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = + tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = cpu_to_le16(data->nrg_th_cca); IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", @@ -462,6 +447,25 @@ static int iwl_sensitivity_write(struct iwl_priv *priv) IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck, data->auto_corr_cck_mrc, data->nrg_th_cck); +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl_sensitivity_write(struct iwl_priv *priv) +{ + struct iwl_sensitivity_cmd cmd; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_sensitivity_cmd), + .flags = CMD_ASYNC, + .data = &cmd, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); /* Update uCode's "work" table, and copy it to DSP */ cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; @@ -480,6 +484,70 @@ static int iwl_sensitivity_write(struct iwl_priv *priv) return iwl_send_cmd(priv, &cmd_out); } +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) +{ + struct iwl_enhance_sensitivity_cmd cmd; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_enhance_sensitivity_cmd), + .flags = CMD_ASYNC, + .data = &cmd, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); + + cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = + HD_INA_NON_SQUARE_DET_OFDM_DATA; + cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = + HD_INA_NON_SQUARE_DET_CCK_DATA; + cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] = + HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] = + HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = + HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] = + HD_OFDM_NON_SQUARE_DET_SLOPE_DATA; + cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] = + HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] = + HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] = + HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] = + HD_CCK_NON_SQUARE_DET_SLOPE_DATA; + cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] = + HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA; + + /* Update uCode's "work" table, and copy it to DSP */ + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE) && + !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX], + &(priv->enhance_sensitivity_tbl[0]), + sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) { + IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]), + sizeof(u16)*HD_TABLE_SIZE); + memcpy(&(priv->enhance_sensitivity_tbl[0]), + &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]), + sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES); + + return iwl_send_cmd(priv, &cmd_out); +} + void iwl_init_sensitivity(struct iwl_priv *priv) { int ret = 0; @@ -530,13 +598,14 @@ void iwl_init_sensitivity(struct iwl_priv *priv) data->last_bad_plcp_cnt_cck = 0; data->last_fa_cnt_cck = 0; - ret |= iwl_sensitivity_write(priv); + if (priv->enhance_sensitivity_table) + ret |= iwl_enhance_sensitivity_write(priv); + else + ret |= iwl_sensitivity_write(priv); IWL_DEBUG_CALIB(priv, "<rx.general); - struct statistics_rx *statistics = &(resp->rx); + struct statistics_rx_non_phy *rx_info; + struct statistics_rx_phy *ofdm, *cck; unsigned long flags; struct statistics_general_data statis; @@ -562,6 +631,16 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, } spin_lock_irqsave(&priv->lock, flags); + if (priv->cfg->bt_statistics) { + rx_info = &(((struct iwl_bt_notif_statistics *)resp)-> + rx.general.common); + ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm); + cck = &(((struct iwl_bt_notif_statistics *)resp)->rx.cck); + } else { + rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); + ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); + cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); + } if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); spin_unlock_irqrestore(&priv->lock, flags); @@ -570,23 +649,23 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, /* Extract Statistics: */ rx_enable_time = le32_to_cpu(rx_info->channel_load); - fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt); - fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt); - bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err); - bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err); + fa_cck = le32_to_cpu(cck->false_alarm_cnt); + fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt); + bad_plcp_cck = le32_to_cpu(cck->plcp_err); + bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); statis.beacon_silence_rssi_a = - le32_to_cpu(statistics->general.beacon_silence_rssi_a); + le32_to_cpu(rx_info->beacon_silence_rssi_a); statis.beacon_silence_rssi_b = - le32_to_cpu(statistics->general.beacon_silence_rssi_b); + le32_to_cpu(rx_info->beacon_silence_rssi_b); statis.beacon_silence_rssi_c = - le32_to_cpu(statistics->general.beacon_silence_rssi_c); + le32_to_cpu(rx_info->beacon_silence_rssi_c); statis.beacon_energy_a = - le32_to_cpu(statistics->general.beacon_energy_a); + le32_to_cpu(rx_info->beacon_energy_a); statis.beacon_energy_b = - le32_to_cpu(statistics->general.beacon_energy_b); + le32_to_cpu(rx_info->beacon_energy_b); statis.beacon_energy_c = - le32_to_cpu(statistics->general.beacon_energy_c); + le32_to_cpu(rx_info->beacon_energy_c); spin_unlock_irqrestore(&priv->lock, flags); @@ -637,9 +716,11 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); - iwl_sensitivity_write(priv); + if (priv->enhance_sensitivity_table) + iwl_enhance_sensitivity_write(priv); + else + iwl_sensitivity_write(priv); } -EXPORT_SYMBOL(iwl_sensitivity_calibration); static inline u8 find_first_chain(u8 mask) { @@ -656,8 +737,7 @@ static inline u8 find_first_chain(u8 mask) * 1) Which antennas are connected. * 2) Differential rx gain settings to balance the 3 receivers. */ -void iwl_chain_noise_calibration(struct iwl_priv *priv, - struct iwl_notif_statistics *stat_resp) +void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) { struct iwl_chain_noise_data *data = NULL; @@ -681,7 +761,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, u32 active_chains = 0; u8 num_tx_chains; unsigned long flags; - struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); + struct statistics_rx_non_phy *rx_info; u8 first_chain; if (priv->disable_chain_noise_cal) @@ -700,6 +780,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, } spin_lock_irqsave(&priv->lock, flags); + if (priv->cfg->bt_statistics) { + rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)-> + rx.general.common); + } else { + rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> + rx.general); + } if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); spin_unlock_irqrestore(&priv->lock, flags); @@ -708,8 +795,19 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK); rxon_chnum = le16_to_cpu(priv->staging_rxon.channel); - stat_band24 = !!(stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK); - stat_chnum = le32_to_cpu(stat_resp->flag) >> 16; + if (priv->cfg->bt_statistics) { + stat_band24 = !!(((struct iwl_bt_notif_statistics *) + stat_resp)->flag & + STATISTICS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = le32_to_cpu(((struct iwl_bt_notif_statistics *) + stat_resp)->flag) >> 16; + } else { + stat_band24 = !!(((struct iwl_notif_statistics *) + stat_resp)->flag & + STATISTICS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) + stat_resp)->flag) >> 16; + } /* Make sure we accumulate data for just the associated channel * (even if scanning). */ @@ -846,6 +944,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, } } + if (active_chains != priv->hw_params.valid_rx_ant && + active_chains != priv->chain_noise_data.active_chains) + IWL_DEBUG_CALIB(priv, + "Detected that not all antennas are connected! " + "Connected: %#x, valid: %#x.\n", + active_chains, priv->hw_params.valid_rx_ant); + /* Save for use within RXON, TX, SCAN commands, etc. */ priv->chain_noise_data.active_chains = active_chains; IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", @@ -890,8 +995,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, data->state = IWL_CHAIN_NOISE_DONE; iwl_power_update_mode(priv, false); } -EXPORT_SYMBOL(iwl_chain_noise_calibration); - void iwl_reset_run_time_calib(struct iwl_priv *priv) { @@ -908,5 +1011,3 @@ void iwl_reset_run_time_calib(struct iwl_priv *priv) * periodically after association */ iwl_send_statistics_request(priv, CMD_ASYNC, true); } -EXPORT_SYMBOL(iwl_reset_run_time_calib); - diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c index 48c023b4ca36d779c2cd1b6ba4100f4659ae12d4..f052c6d09b374da05ca891f0e0a6b2ed8f78671f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c @@ -28,6 +28,30 @@ #include "iwl-agn-debugfs.h" +static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + u32 flag; + + if (priv->cfg->bt_statistics) + flag = le32_to_cpu(priv->_agn.statistics_bt.flag); + else + flag = le32_to_cpu(priv->_agn.statistics.flag); + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); + if (flag & UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (flag & UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (flag & UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + + return p; +} + ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -58,24 +82,45 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, * the last statistics notification from uCode * might not reflect the current uCode activity */ - ofdm = &priv->statistics.rx.ofdm; - cck = &priv->statistics.rx.cck; - general = &priv->statistics.rx.general; - ht = &priv->statistics.rx.ofdm_ht; - accum_ofdm = &priv->accum_statistics.rx.ofdm; - accum_cck = &priv->accum_statistics.rx.cck; - accum_general = &priv->accum_statistics.rx.general; - accum_ht = &priv->accum_statistics.rx.ofdm_ht; - delta_ofdm = &priv->delta_statistics.rx.ofdm; - delta_cck = &priv->delta_statistics.rx.cck; - delta_general = &priv->delta_statistics.rx.general; - delta_ht = &priv->delta_statistics.rx.ofdm_ht; - max_ofdm = &priv->max_delta.rx.ofdm; - max_cck = &priv->max_delta.rx.cck; - max_general = &priv->max_delta.rx.general; - max_ht = &priv->max_delta.rx.ofdm_ht; - - pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + if (priv->cfg->bt_statistics) { + ofdm = &priv->_agn.statistics_bt.rx.ofdm; + cck = &priv->_agn.statistics_bt.rx.cck; + general = &priv->_agn.statistics_bt.rx.general.common; + ht = &priv->_agn.statistics_bt.rx.ofdm_ht; + accum_ofdm = &priv->_agn.accum_statistics_bt.rx.ofdm; + accum_cck = &priv->_agn.accum_statistics_bt.rx.cck; + accum_general = + &priv->_agn.accum_statistics_bt.rx.general.common; + accum_ht = &priv->_agn.accum_statistics_bt.rx.ofdm_ht; + delta_ofdm = &priv->_agn.delta_statistics_bt.rx.ofdm; + delta_cck = &priv->_agn.delta_statistics_bt.rx.cck; + delta_general = + &priv->_agn.delta_statistics_bt.rx.general.common; + delta_ht = &priv->_agn.delta_statistics_bt.rx.ofdm_ht; + max_ofdm = &priv->_agn.max_delta_bt.rx.ofdm; + max_cck = &priv->_agn.max_delta_bt.rx.cck; + max_general = &priv->_agn.max_delta_bt.rx.general.common; + max_ht = &priv->_agn.max_delta_bt.rx.ofdm_ht; + } else { + ofdm = &priv->_agn.statistics.rx.ofdm; + cck = &priv->_agn.statistics.rx.cck; + general = &priv->_agn.statistics.rx.general; + ht = &priv->_agn.statistics.rx.ofdm_ht; + accum_ofdm = &priv->_agn.accum_statistics.rx.ofdm; + accum_cck = &priv->_agn.accum_statistics.rx.cck; + accum_general = &priv->_agn.accum_statistics.rx.general; + accum_ht = &priv->_agn.accum_statistics.rx.ofdm_ht; + delta_ofdm = &priv->_agn.delta_statistics.rx.ofdm; + delta_cck = &priv->_agn.delta_statistics.rx.cck; + delta_general = &priv->_agn.delta_statistics.rx.general; + delta_ht = &priv->_agn.delta_statistics.rx.ofdm_ht; + max_ofdm = &priv->_agn.max_delta.rx.ofdm; + max_cck = &priv->_agn.max_delta.rx.cck; + max_general = &priv->_agn.max_delta.rx.general; + max_ht = &priv->_agn.max_delta.rx.ofdm_ht; + } + + pos += iwl_statistics_flag(priv, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Rx - OFDM:"); @@ -539,11 +584,19 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file, * the last statistics notification from uCode * might not reflect the current uCode activity */ - tx = &priv->statistics.tx; - accum_tx = &priv->accum_statistics.tx; - delta_tx = &priv->delta_statistics.tx; - max_tx = &priv->max_delta.tx; - pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + if (priv->cfg->bt_statistics) { + tx = &priv->_agn.statistics_bt.tx; + accum_tx = &priv->_agn.accum_statistics_bt.tx; + delta_tx = &priv->_agn.delta_statistics_bt.tx; + max_tx = &priv->_agn.max_delta_bt.tx; + } else { + tx = &priv->_agn.statistics.tx; + accum_tx = &priv->_agn.accum_statistics.tx; + delta_tx = &priv->_agn.delta_statistics.tx; + max_tx = &priv->_agn.max_delta.tx; + } + + pos += iwl_statistics_flag(priv, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_Tx:"); @@ -738,8 +791,8 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf, char *buf; int bufsz = sizeof(struct statistics_general) * 10 + 300; ssize_t ret; - struct statistics_general *general, *accum_general; - struct statistics_general *delta_general, *max_general; + struct statistics_general_common *general, *accum_general; + struct statistics_general_common *delta_general, *max_general; struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; struct statistics_div *div, *accum_div, *delta_div, *max_div; @@ -756,19 +809,35 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf, * the last statistics notification from uCode * might not reflect the current uCode activity */ - general = &priv->statistics.general; - dbg = &priv->statistics.general.dbg; - div = &priv->statistics.general.div; - accum_general = &priv->accum_statistics.general; - delta_general = &priv->delta_statistics.general; - max_general = &priv->max_delta.general; - accum_dbg = &priv->accum_statistics.general.dbg; - delta_dbg = &priv->delta_statistics.general.dbg; - max_dbg = &priv->max_delta.general.dbg; - accum_div = &priv->accum_statistics.general.div; - delta_div = &priv->delta_statistics.general.div; - max_div = &priv->max_delta.general.div; - pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + if (priv->cfg->bt_statistics) { + general = &priv->_agn.statistics_bt.general.common; + dbg = &priv->_agn.statistics_bt.general.common.dbg; + div = &priv->_agn.statistics_bt.general.common.div; + accum_general = &priv->_agn.accum_statistics_bt.general.common; + accum_dbg = &priv->_agn.accum_statistics_bt.general.common.dbg; + accum_div = &priv->_agn.accum_statistics_bt.general.common.div; + delta_general = &priv->_agn.delta_statistics_bt.general.common; + max_general = &priv->_agn.max_delta_bt.general.common; + delta_dbg = &priv->_agn.delta_statistics_bt.general.common.dbg; + max_dbg = &priv->_agn.max_delta_bt.general.common.dbg; + delta_div = &priv->_agn.delta_statistics_bt.general.common.div; + max_div = &priv->_agn.max_delta_bt.general.common.div; + } else { + general = &priv->_agn.statistics.general.common; + dbg = &priv->_agn.statistics.general.common.dbg; + div = &priv->_agn.statistics.general.common.div; + accum_general = &priv->_agn.accum_statistics.general.common; + accum_dbg = &priv->_agn.accum_statistics.general.common.dbg; + accum_div = &priv->_agn.accum_statistics.general.common.div; + delta_general = &priv->_agn.delta_statistics.general.common; + max_general = &priv->_agn.max_delta.general.common; + delta_dbg = &priv->_agn.delta_statistics.general.common.dbg; + max_dbg = &priv->_agn.max_delta.general.common.dbg; + delta_div = &priv->_agn.delta_statistics.general.common.div; + max_div = &priv->_agn.max_delta.general.common.div; + } + + pos += iwl_statistics_flag(priv, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" "acumulative delta max\n", "Statistics_General:"); @@ -790,6 +859,13 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf, le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "wait_for_silence_timeout_count:", + le32_to_cpu(dbg->wait_for_silence_timeout_cnt), + accum_dbg->wait_for_silence_timeout_cnt, + delta_dbg->wait_for_silence_timeout_cnt, + max_dbg->wait_for_silence_timeout_cnt); pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u %10u %10u %10u\n", "sleep_time:", @@ -848,3 +924,90 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf, kfree(buf); return ret; } + +ssize_t iwl_ucode_bt_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200; + ssize_t ret; + struct statistics_bt_activity *bt, *accum_bt; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + ret = iwl_send_statistics_request(priv, CMD_SYNC, false); + mutex_unlock(&priv->mutex); + + if (ret) { + IWL_ERR(priv, + "Error sending statistics request: %zd\n", ret); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + bt = &priv->_agn.statistics_bt.general.activity; + accum_bt = &priv->_agn.accum_statistics_bt.general.activity; + + pos += iwl_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\t\t\tcurrent\t\t\taccumulative\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_tx_req_cnt), + accum_bt->hi_priority_tx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_tx_denied_cnt), + accum_bt->hi_priority_tx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_tx_req_cnt), + accum_bt->lo_priority_tx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_tx_denied_cnt), + accum_bt->lo_priority_tx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_rx_req_cnt), + accum_bt->hi_priority_rx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->hi_priority_rx_denied_cnt), + accum_bt->hi_priority_rx_denied_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_rx_req_cnt), + accum_bt->lo_priority_rx_req_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n", + le32_to_cpu(bt->lo_priority_rx_denied_cnt), + accum_bt->lo_priority_rx_denied_cnt); + + pos += scnprintf(buf + pos, bufsz - pos, + "(rx)num_bt_kills:\t\t%u\t\t\t%u\n", + le32_to_cpu(priv->_agn.statistics_bt.rx. + general.num_bt_kills), + priv->_agn.accum_statistics_bt.rx. + general.num_bt_kills); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h index 59b1f25f0d85bd9944bfae9a3f47d6c46005a92b..bbdce5913ac77a51d52fb02cbef8a587287b6b8e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h @@ -37,6 +37,8 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); +ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); #else static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -53,4 +55,9 @@ static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user { return 0; } +static ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c index 01658cf82d3904dc7cb1dc91bb7bdbf7f3152f85..a7216dda97868dd576c02ddee55484e9a3121ac4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c @@ -37,7 +37,7 @@ #include "iwl-io.h" #include "iwl-agn.h" -static int iwlagn_send_rxon_assoc(struct iwl_priv *priv) +int iwlagn_send_rxon_assoc(struct iwl_priv *priv) { int ret = 0; struct iwl5000_rxon_assoc_cmd rxon_assoc; @@ -84,7 +84,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv) return ret; } -static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) +int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) { struct iwl_tx_ant_config_cmd tx_ant_cmd = { .valid = cpu_to_le32(valid_tx_ant), @@ -164,7 +164,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD; + cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_gain_cmd; cmd.hdr.first_group = 0; cmd.hdr.groups_num = 1; cmd.hdr.data_valid = 1; @@ -176,14 +176,6 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, data->radio_write = 1; data->state = IWL_CHAIN_NOISE_CALIBRATED; } - - data->chain_noise_a = 0; - data->chain_noise_b = 0; - data->chain_noise_c = 0; - data->chain_signal_a = 0; - data->chain_signal_b = 0; - data->chain_signal_c = 0; - data->beacon_count = 0; } static void iwlagn_chain_noise_reset(struct iwl_priv *priv) @@ -191,11 +183,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv) struct iwl_chain_noise_data *data = &priv->chain_noise_data; int ret; - if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && + iwl_is_associated(priv)) { struct iwl_calib_chain_noise_reset_cmd cmd; - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD; + /* clear data for chain noise calibration algorithm */ + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_reset_cmd; cmd.hdr.first_group = 0; cmd.hdr.groups_num = 1; cmd.hdr.data_valid = 1; @@ -212,7 +214,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv) static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, __le32 *tx_flags) { - *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; + *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; } /* Calc max signal level (dBm) among 3 possible receivers */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h index f9a3fbb6338fe04dd8e082d747631c114c02132a..a52b82c8e7a6969482f1d9524b56c96821322e81 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h @@ -112,7 +112,7 @@ */ struct iwlagn_scd_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; -} __attribute__ ((packed)); +} __packed; #endif /* __iwl_agn_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 0f292a210ed92d1a67153234b56045647807f3f9..a1b6d202d57c0b77b4bd510ada832124486eeceb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -77,7 +77,7 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", agg->frame_count, agg->start_idx, idx); - info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); + info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); info->status.rates[0].count = tx_resp->failure_frame + 1; info->flags &= ~IEEE80211_TX_CTL_AMPDU; info->flags |= iwl_tx_status_to_mac80211(status); @@ -93,6 +93,12 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, } else { /* Two or more frames were attempted; expect block-ack */ u64 bitmap = 0; + + /* + * Start is the lowest frame sent. It may not be the first + * frame in the batch; we figure this out dynamically during + * the following loop. + */ int start = agg->start_idx; /* Construct bit-map of pending frames within Tx window */ @@ -131,25 +137,58 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", i, idx, SEQ_TO_SN(sc)); + /* + * sh -> how many frames ahead of the starting frame is + * the current one? + * + * Note that all frames sent in the batch must be in a + * 64-frame window, so this number should be in [0,63]. + * If outside of this window, then we've found a new + * "first" frame in the batch and need to change start. + */ sh = idx - start; - if (sh > 64) { - sh = (start - idx) + 0xff; + + /* + * If >= 64, out of window. start must be at the front + * of the circular buffer, idx must be near the end of + * the buffer, and idx is the new "first" frame. Shift + * the indices around. + */ + if (sh >= 64) { + /* Shift bitmap by start - idx, wrapped */ + sh = 0x100 - idx + start; bitmap = bitmap << sh; + /* Now idx is the new start so sh = 0 */ sh = 0; start = idx; - } else if (sh < -64) - sh = 0xff - (start - idx); - else if (sh < 0) { + /* + * If <= -64 then wraps the 256-pkt circular buffer + * (e.g., start = 255 and idx = 0, sh should be 1) + */ + } else if (sh <= -64) { + sh = 0x100 - start + idx; + /* + * If < 0 but > -64, out of window. idx is before start + * but not wrapped. Shift the indices around. + */ + } else if (sh < 0) { + /* Shift by how far start is ahead of idx */ sh = start - idx; - start = idx; bitmap = bitmap << sh; + /* Now idx is the new start so sh = 0 */ + start = idx; sh = 0; } + /* Sequence number start + sh was sent in this batch */ bitmap |= 1ULL << sh; IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", start, (unsigned long long)bitmap); } + /* + * Store the bitmap and possibly the new start, if we wrapped + * the buffer above + */ agg->bitmap = bitmap; agg->start_idx = start; IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", @@ -166,7 +205,9 @@ void iwl_check_abort_status(struct iwl_priv *priv, u8 frame_count, u32 status) { if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { - IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n"); + IWL_ERR(priv, "Tx flush command to flush out all frames\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->tx_flush); } } @@ -184,6 +225,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, int tid; int sta_id; int freed; + unsigned long flags; if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " @@ -193,15 +235,16 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, return; } - info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); memset(&info->status, 0, sizeof(info->status)); tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; + spin_lock_irqsave(&priv->sta_lock, flags); if (txq->sched_retry) { const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp); - struct iwl_ht_agg *agg = NULL; + struct iwl_ht_agg *agg; agg = &priv->stations[sta_id].tid[tid].agg; @@ -256,6 +299,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); iwl_check_abort_status(priv, tx_resp->frame_count, status); + spin_unlock_irqrestore(&priv->sta_lock, flags); } void iwlagn_rx_handler_setup(struct iwl_priv *priv) @@ -319,7 +363,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv) void iwlagn_temperature(struct iwl_priv *priv) { /* store temperature from statistics (in Celsius) */ - priv->temperature = le32_to_cpu(priv->statistics.general.temperature); + priv->temperature = + le32_to_cpu(priv->_agn.statistics.general.common.temperature); iwl_tt_handler(priv); } @@ -444,7 +489,7 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) /* Tell device where to find RBD circular buffer in DRAM */ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, - (u32)(rxq->dma_addr >> 8)); + (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, @@ -709,7 +754,7 @@ void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) } dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->dma_addr); + rxq->bd_dma); dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); rxq->bd = NULL; @@ -755,132 +800,6 @@ static inline int iwlagn_calc_rssi(struct iwl_priv *priv, return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); } -#ifdef CONFIG_IWLWIFI_DEBUG -/** - * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions - * - * You may hack this function to show different aspects of received frames, - * including selective frame dumps. - * group100 parameter selects whether to show 1 out of 100 good data frames. - * All beacon and probe response frames are printed. - */ -static void iwlagn_dbg_report_frame(struct iwl_priv *priv, - struct iwl_rx_phy_res *phy_res, u16 length, - struct ieee80211_hdr *header, int group100) -{ - u32 to_us; - u32 print_summary = 0; - u32 print_dump = 0; /* set to 1 to dump all frames' contents */ - u32 hundred = 0; - u32 dataframe = 0; - __le16 fc; - u16 seq_ctl; - u16 channel; - u16 phy_flags; - u32 rate_n_flags; - u32 tsf_low; - int rssi; - - if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX))) - return; - - /* MAC header */ - fc = header->frame_control; - seq_ctl = le16_to_cpu(header->seq_ctrl); - - /* metadata */ - channel = le16_to_cpu(phy_res->channel); - phy_flags = le16_to_cpu(phy_res->phy_flags); - rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); - - /* signal statistics */ - rssi = iwlagn_calc_rssi(priv, phy_res); - tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff; - - to_us = !compare_ether_addr(header->addr1, priv->mac_addr); - - /* if data frame is to us and all is good, - * (optionally) print summary for only 1 out of every 100 */ - if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) == - cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { - dataframe = 1; - if (!group100) - print_summary = 1; /* print each frame */ - else if (priv->framecnt_to_us < 100) { - priv->framecnt_to_us++; - print_summary = 0; - } else { - priv->framecnt_to_us = 0; - print_summary = 1; - hundred = 1; - } - } else { - /* print summary for all other frames */ - print_summary = 1; - } - - if (print_summary) { - char *title; - int rate_idx; - u32 bitrate; - - if (hundred) - title = "100Frames"; - else if (ieee80211_has_retry(fc)) - title = "Retry"; - else if (ieee80211_is_assoc_resp(fc)) - title = "AscRsp"; - else if (ieee80211_is_reassoc_resp(fc)) - title = "RasRsp"; - else if (ieee80211_is_probe_resp(fc)) { - title = "PrbRsp"; - print_dump = 1; /* dump frame contents */ - } else if (ieee80211_is_beacon(fc)) { - title = "Beacon"; - print_dump = 1; /* dump frame contents */ - } else if (ieee80211_is_atim(fc)) - title = "ATIM"; - else if (ieee80211_is_auth(fc)) - title = "Auth"; - else if (ieee80211_is_deauth(fc)) - title = "DeAuth"; - else if (ieee80211_is_disassoc(fc)) - title = "DisAssoc"; - else - title = "Frame"; - - rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); - if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) { - bitrate = 0; - WARN_ON_ONCE(1); - } else { - bitrate = iwl_rates[rate_idx].ieee / 2; - } - - /* print frame summary. - * MAC addresses show just the last byte (for brevity), - * but you can hack it to show more, if you'd like to. */ - if (dataframe) - IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " - "len=%u, rssi=%d, chnl=%d, rate=%u,\n", - title, le16_to_cpu(fc), header->addr1[5], - length, rssi, channel, bitrate); - else { - /* src/dst addresses assume managed mode */ - IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, " - "len=%u, rssi=%d, tim=%lu usec, " - "phy=0x%02x, chnl=%d\n", - title, le16_to_cpu(fc), header->addr1[5], - header->addr3[5], length, rssi, - tsf_low - priv->scan_start_tsf, - phy_flags, channel); - } - } - if (print_dump) - iwl_print_hex_dump(priv, IWL_DL_RX, header, length); -} -#endif - static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) { u32 decrypt_out = 0; @@ -988,7 +907,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_res *phy_res; __le32 rx_pkt_status; - struct iwl4965_rx_mpdu_res_start *amsdu; + struct iwl_rx_mpdu_res_start *amsdu; u32 len; u32 ampdu_status; u32 rate_n_flags; @@ -1017,7 +936,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv, return; } phy_res = &priv->_agn.last_phy_res; - amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; + amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); len = le16_to_cpu(amsdu->byte_count); rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); @@ -1060,11 +979,6 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* Find max signal strength (dBm) among 3 antenna/receiver chains */ rx_status.signal = iwlagn_calc_rssi(priv, phy_res); -#ifdef CONFIG_IWLWIFI_DEBUG - /* Set "1" to report good data frames in groups of 100 */ - if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX)) - iwlagn_dbg_report_frame(priv, phy_res, len, header, 1); -#endif iwl_dbg_log_rx_data_frame(priv, len, header); IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); @@ -1252,6 +1166,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) bool is_active = false; int chan_mod; u8 active_chains; + u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; conf = ieee80211_get_hw_conf(priv->hw); @@ -1319,7 +1234,10 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); spin_lock_irqsave(&priv->lock, flags); - interval = vif ? vif->bss_conf.beacon_int : 0; + if (priv->is_internal_short_scan) + interval = 0; + else + interval = vif->bss_conf.beacon_int; spin_unlock_irqrestore(&priv->lock, flags); scan->suspend_time = 0; @@ -1403,11 +1321,14 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; - if (priv->cfg->scan_antennas[band]) - rx_ant = priv->cfg->scan_antennas[band]; + if (priv->cfg->scan_rx_antennas[band]) + rx_ant = priv->cfg->scan_rx_antennas[band]; + + if (priv->cfg->scan_tx_antennas[band]) + scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; - priv->scan_tx_ant[band] = - iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]); + priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], + scan_tx_antennas); rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); @@ -1433,13 +1354,15 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) if (!priv->is_internal_short_scan) { cmd_len = iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, + vif->addr, priv->scan_request->ie, priv->scan_request->ie_len, IWL_MAX_SCAN_SIZE - sizeof(*scan)); } else { + /* use bcast addr, will not be transmitted but must be valid */ cmd_len = iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, - NULL, 0, + iwl_bcast_addr, NULL, 0, IWL_MAX_SCAN_SIZE - sizeof(*scan)); } @@ -1502,3 +1425,96 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv, return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, vif->bss_conf.bssid); } + +void iwl_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed) +{ + WARN_ON(!spin_is_locked(&priv->sta_lock)); + + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + else { + IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", + priv->stations[sta_id].tid[tid].tfds_in_queue, + freed); + priv->stations[sta_id].tid[tid].tfds_in_queue = 0; + } +} + +#define IWL_FLUSH_WAIT_MS 2000 + +int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq; + struct iwl_queue *q; + int cnt; + unsigned long now = jiffies; + int ret = 0; + + /* waiting for all the tx frames complete might take a while */ + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + if (cnt == IWL_CMD_QUEUE_NUM) + continue; + txq = &priv->txq[cnt]; + q = &txq->q; + while (q->read_ptr != q->write_ptr && !time_after(jiffies, + now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) + msleep(1); + + if (q->read_ptr != q->write_ptr) { + IWL_ERR(priv, "fail to flush all tx fifo queues\n"); + ret = -ETIMEDOUT; + break; + } + } + return ret; +} + +#define IWL_TX_QUEUE_MSK 0xfffff + +/** + * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode + * + * pre-requirements: + * 1. acquire mutex before calling + * 2. make sure rf is on and not in exit state + */ +int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) +{ + struct iwl_txfifo_flush_cmd flush_cmd; + struct iwl_host_cmd cmd = { + .id = REPLY_TXFIFO_FLUSH, + .len = sizeof(struct iwl_txfifo_flush_cmd), + .flags = CMD_SYNC, + .data = &flush_cmd, + }; + + might_sleep(); + + memset(&flush_cmd, 0, sizeof(flush_cmd)); + flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK | + IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK; + if (priv->cfg->sku & IWL_SKU_N) + flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; + + IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", + flush_cmd.fifo_control); + flush_cmd.flush_control = cpu_to_le16(flush_control); + + return iwl_send_cmd(priv, &cmd); +} + +void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) +{ + mutex_lock(&priv->mutex); + ieee80211_stop_queues(priv->hw); + if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; + } + IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); + iwlagn_wait_tx_queue_empty(priv); +done: + ieee80211_wake_queues(priv->hw); + mutex_unlock(&priv->mutex); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index cf4a95bae4ffb1369ca5a192919a5caf0445f8cc..35c86d22b14bc67e33748854b02a80719315fb9e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -313,8 +313,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, */ IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", tid); - ieee80211_stop_tx_ba_session(sta, tid, - WLAN_BACK_INITIATOR); + ieee80211_stop_tx_ba_session(sta, tid); } } else IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid); @@ -325,18 +324,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, struct iwl_lq_sta *lq_data, struct ieee80211_sta *sta) { - if ((tid < TID_MAX_LOAD_COUNT) && - !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) { - if (priv->cfg->use_rts_for_ht) { - /* - * switch to RTS/CTS if it is the prefer protection - * method for HT traffic - */ - IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n"); - priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; - iwlcore_commit_rxon(priv); - } - } + if (tid < TID_MAX_LOAD_COUNT) + rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + else + IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", + tid, TID_MAX_LOAD_COUNT); } static inline int get_num_of_ant_from_rate(u32 rate_n_flags) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c new file mode 100644 index 0000000000000000000000000000000000000000..9490eced1198ad178f6191389156565ceca9cbd1 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c @@ -0,0 +1,351 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-calib.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-agn-hw.h" +#include "iwl-agn.h" + +void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > + priv->missed_beacon_threshold) { + IWL_DEBUG_CALIB(priv, + "missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consecutive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + if (!test_bit(STATUS_SCANNING, &priv->status)) + iwl_init_sensitivity(priv); + } +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwl_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info; + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a, bcn_silence_b, bcn_silence_c; + int last_rx_noise; + + if (priv->cfg->bt_statistics) + rx_info = &(priv->_agn.statistics_bt.rx.general.common); + else + rx_info = &(priv->_agn.statistics.rx.general); + bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + last_rx_noise = (total_silence / num_active_rx) - 107; + else + last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + last_rx_noise); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +/* + * based on the assumption of all statistics counter are in DWORD + * FIXME: This function is for debugging, do not deal with + * the case of counters roll-over. + */ +static void iwl_accumulative_statistics(struct iwl_priv *priv, + __le32 *stats) +{ + int i, size; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + struct statistics_general_common *general, *accum_general; + struct statistics_tx *tx, *accum_tx; + + if (priv->cfg->bt_statistics) { + prev_stats = (__le32 *)&priv->_agn.statistics_bt; + accum_stats = (u32 *)&priv->_agn.accum_statistics_bt; + size = sizeof(struct iwl_bt_notif_statistics); + general = &priv->_agn.statistics_bt.general.common; + accum_general = &priv->_agn.accum_statistics_bt.general.common; + tx = &priv->_agn.statistics_bt.tx; + accum_tx = &priv->_agn.accum_statistics_bt.tx; + delta = (u32 *)&priv->_agn.delta_statistics_bt; + max_delta = (u32 *)&priv->_agn.max_delta_bt; + } else { + prev_stats = (__le32 *)&priv->_agn.statistics; + accum_stats = (u32 *)&priv->_agn.accum_statistics; + size = sizeof(struct iwl_notif_statistics); + general = &priv->_agn.statistics.general.common; + accum_general = &priv->_agn.accum_statistics.general.common; + tx = &priv->_agn.statistics.tx; + accum_tx = &priv->_agn.accum_statistics.tx; + delta = (u32 *)&priv->_agn.delta_statistics; + max_delta = (u32 *)&priv->_agn.max_delta; + } + for (i = sizeof(__le32); i < size; + i += sizeof(__le32), stats++, prev_stats++, delta++, + max_delta++, accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = (le32_to_cpu(*stats) - + le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative statistics for "no-counter" type statistics */ + accum_general->temperature = general->temperature; + accum_general->temperature_m = general->temperature_m; + accum_general->ttl_timestamp = general->ttl_timestamp; + accum_tx->tx_power.ant_a = tx->tx_power.ant_a; + accum_tx->tx_power.ant_b = tx->tx_power.ant_b; + accum_tx->tx_power.ant_c = tx->tx_power.ant_c; +} +#endif + +#define REG_RECALIB_PERIOD (60) + +/** + * iwl_good_plcp_health - checks for plcp error. + * + * When the plcp error is exceeding the thresholds, reset the radio + * to improve the throughput. + */ +bool iwl_good_plcp_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) +{ + bool rc = true; + int combined_plcp_delta; + unsigned int plcp_msec; + unsigned long plcp_received_jiffies; + + if (priv->cfg->plcp_delta_threshold == + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { + IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); + return rc; + } + + /* + * check for plcp_err and trigger radio reset if it exceeds + * the plcp error threshold plcp_delta. + */ + plcp_received_jiffies = jiffies; + plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - + (long) priv->plcp_jiffies); + priv->plcp_jiffies = plcp_received_jiffies; + /* + * check to make sure plcp_msec is not 0 to prevent division + * by zero. + */ + if (plcp_msec) { + struct statistics_rx_phy *ofdm; + struct statistics_rx_ht_phy *ofdm_ht; + + if (priv->cfg->bt_statistics) { + ofdm = &pkt->u.stats_bt.rx.ofdm; + ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht; + combined_plcp_delta = + (le32_to_cpu(ofdm->plcp_err) - + le32_to_cpu(priv->_agn.statistics_bt. + rx.ofdm.plcp_err)) + + (le32_to_cpu(ofdm_ht->plcp_err) - + le32_to_cpu(priv->_agn.statistics_bt. + rx.ofdm_ht.plcp_err)); + } else { + ofdm = &pkt->u.stats.rx.ofdm; + ofdm_ht = &pkt->u.stats.rx.ofdm_ht; + combined_plcp_delta = + (le32_to_cpu(ofdm->plcp_err) - + le32_to_cpu(priv->_agn.statistics. + rx.ofdm.plcp_err)) + + (le32_to_cpu(ofdm_ht->plcp_err) - + le32_to_cpu(priv->_agn.statistics. + rx.ofdm_ht.plcp_err)); + } + + if ((combined_plcp_delta > 0) && + ((combined_plcp_delta * 100) / plcp_msec) > + priv->cfg->plcp_delta_threshold) { + /* + * if plcp_err exceed the threshold, + * the following data is printed in csv format: + * Text: plcp_err exceeded %d, + * Received ofdm.plcp_err, + * Current ofdm.plcp_err, + * Received ofdm_ht.plcp_err, + * Current ofdm_ht.plcp_err, + * combined_plcp_delta, + * plcp_msec + */ + IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " + "%u, %u, %u, %u, %d, %u mSecs\n", + priv->cfg->plcp_delta_threshold, + le32_to_cpu(ofdm->plcp_err), + le32_to_cpu(ofdm->plcp_err), + le32_to_cpu(ofdm_ht->plcp_err), + le32_to_cpu(ofdm_ht->plcp_err), + combined_plcp_delta, plcp_msec); + + rc = false; + } + } + return rc; +} + +void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + int change; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + if (priv->cfg->bt_statistics) { + IWL_DEBUG_RX(priv, + "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_bt_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->_agn.statistics_bt.general.common.temperature != + pkt->u.stats_bt.general.common.temperature) || + ((priv->_agn.statistics_bt.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats_bt.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK))); +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt); +#endif + + } else { + IWL_DEBUG_RX(priv, + "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->_agn.statistics.general.common.temperature != + pkt->u.stats.general.common.temperature) || + ((priv->_agn.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK))); +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); +#endif + + } + + iwl_recover_from_statistics(priv, pkt); + + if (priv->cfg->bt_statistics) + memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt, + sizeof(priv->_agn.statistics_bt)); + else + memcpy(&priv->_agn.statistics, &pkt->u.stats, + sizeof(priv->_agn.statistics)); + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * REG_RECALIB_PERIOD seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwl_rx_calc_noise(priv); + queue_work(priv->workqueue, &priv->run_time_calib_work); + } + if (priv->cfg->ops->lib->temp_ops.temperature && change) + priv->cfg->ops->lib->temp_ops.temperature(priv); +} + +void iwl_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_DEBUGFS + memset(&priv->_agn.accum_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_agn.delta_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_agn.max_delta, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_agn.accum_statistics_bt, 0, + sizeof(struct iwl_bt_notif_statistics)); + memset(&priv->_agn.delta_statistics_bt, 0, + sizeof(struct iwl_bt_notif_statistics)); + memset(&priv->_agn.max_delta_bt, 0, + sizeof(struct iwl_bt_notif_statistics)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + iwl_rx_statistics(priv, rxb); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 7d614c4d3c6200d5410bba749105f4d371b2e5b7..55a1b31fd09ae058bf805583d508715ab6a12d09 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -233,6 +233,7 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, { unsigned long flags; u16 ra_tid; + int ret; if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues @@ -248,7 +249,9 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, ra_tid = BUILD_RAxTID(sta_id, tid); /* Modify device's station table to Tx this TID */ - iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); + ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); + if (ret) + return ret; spin_lock_irqsave(&priv->lock, flags); @@ -469,7 +472,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, } /* Set up antennas */ - priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); /* Set the rate in the TX cmd */ @@ -567,10 +571,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr_len = ieee80211_hdrlen(fc); /* Find index into station table for destination station */ - if (!info->control.sta) - sta_id = priv->hw_params.bcast_sta_id; - else - sta_id = iwl_sta_id(info->control.sta); + sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); if (sta_id == IWL_INVALID_STATION) { IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", hdr->addr1); @@ -598,11 +599,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) } txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); + + /* irqs already disabled/saved above when locking priv->lock */ + spin_lock(&priv->sta_lock); + if (ieee80211_is_data_qos(fc)) { qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - if (unlikely(tid >= MAX_TID_COUNT)) + if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { + spin_unlock(&priv->sta_lock); goto drop_unlock; + } seq_number = priv->stations[sta_id].tid[tid].seq_number; seq_number &= IEEE80211_SCTL_SEQ; hdr->seq_ctrl = hdr->seq_ctrl & @@ -620,15 +627,22 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) swq_id = txq->swq_id; q = &txq->q; - if (unlikely(iwl_queue_space(q) < q->high_mark)) + if (unlikely(iwl_queue_space(q) < q->high_mark)) { + spin_unlock(&priv->sta_lock); goto drop_unlock; + } - if (ieee80211_is_data_qos(fc)) + if (ieee80211_is_data_qos(fc)) { priv->stations[sta_id].tid[tid].tfds_in_queue++; + if (!ieee80211_has_morefrags(fc)) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + + spin_unlock(&priv->sta_lock); /* Set up driver data for this TFD */ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); - txq->txb[q->write_ptr].skb[0] = skb; + txq->txb[q->write_ptr].skb = skb; /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_cmd = txq->cmd[q->write_ptr]; @@ -694,8 +708,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, len, PCI_DMA_BIDIRECTIONAL); - pci_unmap_addr_set(out_meta, mapping, txcmd_phys); - pci_unmap_len_set(out_meta, len, len); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, len); /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, @@ -703,8 +717,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) if (!ieee80211_has_morefrags(hdr->frame_control)) { txq->need_update = 1; - if (qc) - priv->stations[sta_id].tid[tid].seq_number = seq_number; } else { wait_write_ptr = 1; txq->need_update = 0; @@ -938,9 +950,12 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv) /* Stop each Tx DMA channel, and wait for it to be idle */ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); - iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, + if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), - 1000); + 1000)) + IWL_ERR(priv, "Failing on timeout while stopping" + " DMA channel %d [0x%08x]", ch, + iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); } spin_unlock_irqrestore(&priv->lock, flags); } @@ -1009,6 +1024,8 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, if (ret) return ret; + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; if (tid_data->tfds_in_queue == 0) { IWL_DEBUG_HT(priv, "HW queue is empty\n"); tid_data->agg.state = IWL_AGG_ON; @@ -1018,6 +1035,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, tid_data->tfds_in_queue); tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; } + spin_unlock_irqrestore(&priv->sta_lock, flags); return ret; } @@ -1040,11 +1058,14 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, return -ENXIO; } + spin_lock_irqsave(&priv->sta_lock, flags); + if (priv->stations[sta_id].tid[tid].agg.state == IWL_EMPTYING_HW_QUEUE_ADDBA) { IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } @@ -1062,13 +1083,17 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); priv->stations[sta_id].tid[tid].agg.state = IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } IWL_DEBUG_HT(priv, "HW queue is empty\n"); priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; - spin_lock_irqsave(&priv->lock, flags); + /* do not restore/save irqs */ + spin_unlock(&priv->sta_lock); + spin_lock(&priv->lock); + /* * the only reason this call can fail is queue number out of range, * which can happen if uCode is reloaded and all the station @@ -1092,6 +1117,8 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv, u8 *addr = priv->stations[sta_id].sta.sta.addr; struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; + WARN_ON(!spin_is_locked(&priv->sta_lock)); + switch (priv->stations[sta_id].tid[tid].agg.state) { case IWL_EMPTYING_HW_QUEUE_DELBA: /* We are reclaiming the last packet of the */ @@ -1116,6 +1143,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv, } break; } + return 0; } @@ -1159,12 +1187,12 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; - iwlagn_tx_status(priv, tx_info->skb[0]); + iwlagn_tx_status(priv, tx_info->skb); - hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; + hdr = (struct ieee80211_hdr *)tx_info->skb->data; if (hdr && ieee80211_is_data_qos(hdr->frame_control)) nfreed++; - tx_info->skb[0] = NULL; + tx_info->skb = NULL; if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); @@ -1188,7 +1216,7 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, int i, sh, ack; u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); - u64 bitmap; + u64 bitmap, sent_bitmap; int successes = 0; struct ieee80211_tx_info *info; @@ -1216,24 +1244,26 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, /* check for success or failure according to the * transmitted bitmap and block-ack bitmap */ - bitmap &= agg->bitmap; + sent_bitmap = bitmap & agg->bitmap; /* For each frame attempted in aggregation, * update driver's record of tx frame's status. */ - for (i = 0; i < agg->frame_count ; i++) { - ack = bitmap & (1ULL << i); - successes += !!ack; + i = 0; + while (sent_bitmap) { + ack = sent_bitmap & 1ULL; + successes += ack; IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, agg->start_idx + i); + sent_bitmap >>= 1; + ++i; } - info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); + info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); memset(&info->status, 0, sizeof(info->status)); info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = successes; - info->status.ampdu_ack_map = bitmap; info->status.ampdu_len = agg->frame_count; iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); @@ -1281,6 +1311,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, int index; int sta_id; int tid; + unsigned long flags; /* "flow" corresponds to Tx queue */ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); @@ -1308,7 +1339,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, /* Find index just before block-ack window */ index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); - /* TODO: Need to get this copy more safely - now good for debug */ + spin_lock_irqsave(&priv->sta_lock, flags); IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", @@ -1344,4 +1375,6 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); } + + spin_unlock_irqrestore(&priv->sta_lock, flags); } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index 637286c396fe9c7cf992ba3c748362f74d130a81..6f77441cb65a3f734260e23b1a38a08574d4a668 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -423,3 +423,126 @@ int iwlagn_alive_notify(struct iwl_priv *priv) return 0; } + + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int ret = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWLAGN_RTC_INST_LOWER_BOUND); + val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + ret = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return ret; +} + +/** + * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image, + u32 len) +{ + u32 val; + u32 save_len = len; + int ret = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWLAGN_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + ret = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return ret; +} + +/** + * iwl_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +int iwl_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int ret; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwlcore_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + ret = iwlcore_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + ret = iwlcore_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl_verify_inst_full(priv, image, len); + + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 24aff654fa9c44251b11f3d12ab63d639ea06384..35337b1e7caceeac3b14ee0c4a2ea3a660010989 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -27,6 +27,8 @@ * *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -120,7 +122,7 @@ int iwl_commit_rxon(struct iwl_priv *priv) (priv->switch_rxon.channel != priv->staging_rxon.channel)) { IWL_DEBUG_11H(priv, "abort channel switch on %d\n", le16_to_cpu(priv->switch_rxon.channel)); - priv->switch_rxon.switch_in_progress = false; + iwl_chswitch_done(priv, false); } /* If we don't need to send a full RXON, we can use @@ -292,9 +294,7 @@ static u32 iwl_fill_beacon_frame(struct iwl_priv *priv, struct ieee80211_hdr *hdr, int left) { - if (!iwl_is_associated(priv) || !priv->ibss_beacon || - ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && - (priv->iw_mode != NL80211_IFTYPE_AP))) + if (!priv->ibss_beacon) return 0; if (priv->ibss_beacon->len > left) @@ -367,7 +367,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, /* Set up packet rate and flags */ rate = iwl_rate_get_lowest_plcp(priv); - priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) rate_flags |= RATE_MCS_CCK_MSK; @@ -474,18 +475,25 @@ void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) /* Unmap tx_cmd */ if (num_tbs) pci_unmap_single(dev, - pci_unmap_addr(&txq->meta[index], mapping), - pci_unmap_len(&txq->meta[index], len), + dma_unmap_addr(&txq->meta[index], mapping), + dma_unmap_len(&txq->meta[index], len), PCI_DMA_BIDIRECTIONAL); /* Unmap chunks, if any. */ - for (i = 1; i < num_tbs; i++) { + for (i = 1; i < num_tbs; i++) pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); - if (txq->txb) { - dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]); - txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; } } } @@ -851,6 +859,24 @@ int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) return 0; } +static void iwl_bg_tx_flush(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, tx_flush); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* do nothing if rf-kill is on */ + if (!iwl_is_ready_rf(priv)) + return; + + if (priv->cfg->ops->lib->txfifo_flush) { + IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); + iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); + } +} + /** * iwl_setup_rx_handlers - Initialize Rx handler callbacks * @@ -933,6 +959,8 @@ void iwl_rx_handle(struct iwl_priv *priv) fill_rx = 1; while (i != r) { + int len; + rxb = rxq->queue[i]; /* If an RXB doesn't have a Rx queue slot associated with it, @@ -947,8 +975,9 @@ void iwl_rx_handle(struct iwl_priv *priv) PCI_DMA_FROMDEVICE); pkt = rxb_addr(rxb); - trace_iwlwifi_dev_rx(priv, pkt, - le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_dev_rx(priv, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. @@ -1450,13 +1479,13 @@ bool iwl_good_ack_health(struct iwl_priv *priv, actual_ack_cnt_delta = le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - - le32_to_cpu(priv->statistics.tx.actual_ack_cnt); + le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt); expected_ack_cnt_delta = le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - - le32_to_cpu(priv->statistics.tx.expected_ack_cnt); + le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt); ba_timeout_delta = le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - - le32_to_cpu(priv->statistics.tx.agg.ba_timeout); + le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout); if ((priv->_agn.agg_tids_count > 0) && (expected_ack_cnt_delta > 0) && (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) @@ -1466,12 +1495,17 @@ bool iwl_good_ack_health(struct iwl_priv *priv, " expected_ack_cnt = %d\n", actual_ack_cnt_delta, expected_ack_cnt_delta); -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* + * This is ifdef'ed on DEBUGFS because otherwise the + * statistics aren't available. If DEBUGFS is set but + * DEBUG is not, these will just compile out. + */ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", - priv->delta_statistics.tx.rx_detected_cnt); + priv->_agn.delta_statistics.tx.rx_detected_cnt); IWL_DEBUG_RADIO(priv, "ack_or_ba_timeout_collision delta = %d\n", - priv->delta_statistics.tx. + priv->_agn.delta_statistics.tx. ack_or_ba_timeout_collision); #endif IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", @@ -1658,6 +1692,7 @@ static void iwl_nic_start(struct iwl_priv *priv) struct iwlagn_ucode_capabilities { u32 max_probe_length; + u32 standard_phy_calibration_size; }; static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); @@ -1694,6 +1729,9 @@ struct iwlagn_firmware_pieces { size_t inst_size, data_size, init_size, init_data_size, boot_size; u32 build; + + u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; + u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; }; static int iwlagn_load_legacy_firmware(struct iwl_priv *priv, @@ -1787,12 +1825,20 @@ static int iwlagn_load_firmware(struct iwl_priv *priv, const u8 *data; int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp; u64 alternatives; + u32 tlv_len; + enum iwl_ucode_tlv_type tlv_type; + const u8 *tlv_data; - if (len < sizeof(*ucode)) + if (len < sizeof(*ucode)) { + IWL_ERR(priv, "uCode has invalid length: %zd\n", len); return -EINVAL; + } - if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) + if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) { + IWL_ERR(priv, "invalid uCode magic: 0X%x\n", + le32_to_cpu(ucode->magic)); return -EINVAL; + } /* * Check which alternatives are present, and "downgrade" @@ -1818,10 +1864,7 @@ static int iwlagn_load_firmware(struct iwl_priv *priv, len -= sizeof(*ucode); while (len >= sizeof(*tlv)) { - u32 tlv_len; - enum iwl_ucode_tlv_type tlv_type; u16 tlv_alt; - const u8 *tlv_data; len -= sizeof(*tlv); tlv = (void *)data; @@ -1831,8 +1874,11 @@ static int iwlagn_load_firmware(struct iwl_priv *priv, tlv_alt = le16_to_cpu(tlv->alternative); tlv_data = tlv->data; - if (len < tlv_len) + if (len < tlv_len) { + IWL_ERR(priv, "invalid TLV len: %zd/%u\n", + len, tlv_len); return -EINVAL; + } len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); @@ -1866,20 +1912,77 @@ static int iwlagn_load_firmware(struct iwl_priv *priv, pieces->boot_size = tlv_len; break; case IWL_UCODE_TLV_PROBE_MAX_LEN: - if (tlv_len != 4) - return -EINVAL; + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; capa->max_probe_length = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_INIT_EVTLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->init_evtlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->init_evtlog_size = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_INIT_ERRLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->init_errlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->inst_evtlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->inst_evtlog_size = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + pieces->inst_errlog_ptr = + le32_to_cpup((__le32 *)tlv_data); + break; + case IWL_UCODE_TLV_ENHANCE_SENS_TBL: + if (tlv_len) + goto invalid_tlv_len; + priv->enhance_sensitivity_table = true; + break; + case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + capa->standard_phy_calibration_size = + le32_to_cpup((__le32 *)tlv_data); break; default: + IWL_WARN(priv, "unknown TLV: %d\n", tlv_type); break; } } - if (len) + if (len) { + IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len); + iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len); return -EINVAL; + } return 0; + + invalid_tlv_len: + IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len); + iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len); + + return -EINVAL; } /** @@ -1901,6 +2004,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) u32 build; struct iwlagn_ucode_capabilities ucode_capa = { .max_probe_length = 200, + .standard_phy_calibration_size = + IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE, }; memset(&pieces, 0, sizeof(pieces)); @@ -2063,6 +2168,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) goto err_pci_alloc; } + /* Now that we can no longer fail, copy information */ + + /* + * The (size - 16) / 12 formula is based on the information recorded + * for each event, which is of mode 1 (including timestamp) for all + * new microcodes that include this information. + */ + priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr; + if (pieces.init_evtlog_size) + priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12; + else + priv->_agn.init_evtlog_size = priv->cfg->max_event_log_size; + priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr; + priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr; + if (pieces.inst_evtlog_size) + priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; + else + priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size; + priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr; + /* Copy images into buffers for card's bus-master reads ... */ /* Runtime instructions (first block of data in file) */ @@ -2102,6 +2227,20 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) pieces.boot_size); memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); + /* + * figure out the offset of chain noise reset and gain commands + * base on the size of standard phy calibration commands table size + */ + if (ucode_capa.standard_phy_calibration_size > + IWL_MAX_PHY_CALIBRATE_TBL_SIZE) + ucode_capa.standard_phy_calibration_size = + IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; + + priv->_agn.phy_calib_chain_noise_reset_cmd = + ucode_capa.standard_phy_calibration_size; + priv->_agn.phy_calib_chain_noise_gain_cmd = + ucode_capa.standard_phy_calibration_size + 1; + /************************************************** * This is still part of probe() in a sense... * @@ -2172,17 +2311,41 @@ static const char *desc_lookup_text[] = { "DEBUG_1", "DEBUG_2", "DEBUG_3", - "ADVANCED SYSASSERT" }; -static const char *desc_lookup(int i) +static struct { char *name; u8 num; } advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *desc_lookup(u32 num) { - int max = ARRAY_SIZE(desc_lookup_text) - 1; + int i; + int max = ARRAY_SIZE(desc_lookup_text); - if (i < 0 || i > max) - i = max; + if (num < max) + return desc_lookup_text[num]; - return desc_lookup_text[i]; + max = ARRAY_SIZE(advanced_lookup) - 1; + for (i = 0; i < max; i++) { + if (advanced_lookup[i].num == num) + break;; + } + return advanced_lookup[i].name; } #define ERROR_START_OFFSET (1 * sizeof(u32)) @@ -2195,10 +2358,15 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) u32 blink1, blink2, ilink1, ilink2; u32 pc, hcmd; - if (priv->ucode_type == UCODE_INIT) + if (priv->ucode_type == UCODE_INIT) { base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); - else + if (!base) + base = priv->_agn.init_errlog_ptr; + } else { base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + if (!base) + base = priv->_agn.inst_errlog_ptr; + } if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { IWL_ERR(priv, @@ -2230,9 +2398,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, blink1, blink2, ilink1, ilink2); - IWL_ERR(priv, "Desc Time " + IWL_ERR(priv, "Desc Time " "data1 data2 line\n"); - IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", + IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", desc_lookup(desc), desc, time, data1, data2, line); IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", @@ -2258,10 +2426,16 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, if (num_events == 0) return pos; - if (priv->ucode_type == UCODE_INIT) + + if (priv->ucode_type == UCODE_INIT) { base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); - else + if (!base) + base = priv->_agn.init_evtlog_ptr; + } else { base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!base) + base = priv->_agn.inst_evtlog_ptr; + } if (mode == 0) event_size = 2 * sizeof(u32); @@ -2363,13 +2537,21 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, u32 num_wraps; /* # times uCode wrapped to top of log */ u32 next_entry; /* index of next entry to be written by uCode */ u32 size; /* # entries that we'll print */ + u32 logsize; int pos = 0; size_t bufsz = 0; - if (priv->ucode_type == UCODE_INIT) + if (priv->ucode_type == UCODE_INIT) { base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); - else + logsize = priv->_agn.init_evtlog_size; + if (!base) + base = priv->_agn.init_evtlog_ptr; + } else { base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + logsize = priv->_agn.inst_evtlog_size; + if (!base) + base = priv->_agn.inst_evtlog_ptr; + } if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { IWL_ERR(priv, @@ -2384,16 +2566,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); - if (capacity > priv->cfg->max_event_log_size) { + if (capacity > logsize) { IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", - capacity, priv->cfg->max_event_log_size); - capacity = priv->cfg->max_event_log_size; + capacity, logsize); + capacity = logsize; } - if (next_entry > priv->cfg->max_event_log_size) { + if (next_entry > logsize) { IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", - next_entry, priv->cfg->max_event_log_size); - next_entry = priv->cfg->max_event_log_size; + next_entry, logsize); + next_entry = logsize; } size = num_wraps ? capacity : next_entry; @@ -2518,8 +2700,6 @@ static void iwl_alive_start(struct iwl_priv *priv) if (priv->cfg->ops->hcmd->set_rxon_chain) priv->cfg->ops->hcmd->set_rxon_chain(priv); - - memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); } /* Configure Bluetooth device coexistence support */ @@ -2843,9 +3023,17 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work) } if (priv->start_calib) { - iwl_chain_noise_calibration(priv, &priv->statistics); - - iwl_sensitivity_calibration(priv, &priv->statistics); + if (priv->cfg->bt_statistics) { + iwl_chain_noise_calibration(priv, + (void *)&priv->_agn.statistics_bt); + iwl_sensitivity_calibration(priv, + (void *)&priv->_agn.statistics_bt); + } else { + iwl_chain_noise_calibration(priv, + (void *)&priv->_agn.statistics); + iwl_sensitivity_calibration(priv, + (void *)&priv->_agn.statistics); + } } mutex_unlock(&priv->mutex); @@ -2934,20 +3122,16 @@ void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", vif->bss_conf.aid, vif->bss_conf.beacon_int); - if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + if (vif->bss_conf.use_short_preamble) priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.assoc_capability & - WLAN_CAPABILITY_SHORT_SLOT_TIME) + if (vif->bss_conf.use_short_slot) priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - if (vif->type == NL80211_IFTYPE_ADHOC) - priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; } iwlcore_commit_rxon(priv); @@ -3173,8 +3357,7 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) priv->staging_rxon.assoc_id = 0; - if (vif->bss_conf.assoc_capability & - WLAN_CAPABILITY_SHORT_PREAMBLE) + if (vif->bss_conf.use_short_preamble) priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else @@ -3182,17 +3365,12 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) ~RXON_FLG_SHORT_PREAMBLE_MSK; if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.assoc_capability & - WLAN_CAPABILITY_SHORT_SLOT_TIME) + if (vif->bss_conf.use_short_slot) priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - if (vif->type == NL80211_IFTYPE_ADHOC) - priv->staging_rxon.flags &= - ~RXON_FLG_SHORT_SLOT_MSK; } /* restore RXON assoc */ priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; @@ -3238,17 +3416,9 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - if (sta) { - sta_id = iwl_sta_id(sta); - - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", - sta->addr); - return -EINVAL; - } - } else { - sta_id = priv->hw_params.bcast_sta_id; - } + sta_id = iwl_sta_id_or_broadcast(priv, sta); + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; mutex_lock(&priv->mutex); iwl_scan_cancel_timeout(priv, 100); @@ -3294,13 +3464,32 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return ret; } +/* + * switch to RTS/CTS for TX + */ +static void iwl_enable_rts_cts(struct iwl_priv *priv) +{ + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n"); + iwlcore_commit_rxon(priv); + } else { + /* scanning, defer the request until scan completed */ + IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n"); + } +} + static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_priv *priv = hw->priv; - int ret; + int ret = -EINVAL; IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", sta->addr, tid); @@ -3308,17 +3497,19 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, if (!(priv->cfg->sku & IWL_SKU_N)) return -EACCES; + mutex_lock(&priv->mutex); + switch (action) { case IEEE80211_AMPDU_RX_START: IWL_DEBUG_HT(priv, "start Rx\n"); - return iwl_sta_rx_agg_start(priv, sta, tid, *ssn); + ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); + break; case IEEE80211_AMPDU_RX_STOP: IWL_DEBUG_HT(priv, "stop Rx\n"); ret = iwl_sta_rx_agg_stop(priv, sta, tid); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return 0; - else - return ret; + ret = 0; + break; case IEEE80211_AMPDU_TX_START: IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); @@ -3327,7 +3518,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", priv->_agn.agg_tids_count); } - return ret; + break; case IEEE80211_AMPDU_TX_STOP: IWL_DEBUG_HT(priv, "stop Tx\n"); ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); @@ -3337,18 +3528,22 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, priv->_agn.agg_tids_count); } if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return 0; - else - return ret; + ret = 0; + break; case IEEE80211_AMPDU_TX_OPERATIONAL: - /* do nothing */ - return -EOPNOTSUPP; - default: - IWL_DEBUG_HT(priv, "unknown\n"); - return -EINVAL; + if (priv->cfg->use_rts_for_ht) { + /* + * switch to RTS/CTS if it is the prefer protection + * method for HT traffic + */ + iwl_enable_rts_cts(priv); + } + ret = 0; break; } - return 0; + mutex_unlock(&priv->mutex); + + return ret; } static void iwl_mac_sta_notify(struct ieee80211_hw *hw, @@ -3423,6 +3618,136 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, return 0; } +static void iwl_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + u16 ch; + unsigned long flags = 0; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwl_is_rfkill(priv)) + goto out_exit; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + goto out_exit; + + if (!iwl_is_associated(priv)) + goto out_exit; + + /* channel switch in progress */ + if (priv->switch_rxon.switch_in_progress == true) + goto out_exit; + + mutex_lock(&priv->mutex); + if (priv->cfg->ops->lib->set_channel_switch) { + + ch = ieee80211_frequency_to_channel( + ch_switch->channel->center_freq); + if (le16_to_cpu(priv->active_rxon.channel) != ch) { + ch_info = iwl_get_channel_info(priv, + conf->channel->band, + ch); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "invalid channel\n"); + goto out; + } + spin_lock_irqsave(&priv->lock, flags); + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ht_conf->is_ht = conf_is_ht(conf); + if (ht_conf->is_ht) { + if (conf_is_ht40_minus(conf)) { + ht_conf->extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ht_conf->is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ht_conf->extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ht_conf->is_40mhz = true; + } else { + ht_conf->extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ht_conf->is_40mhz = false; + } + } else + ht_conf->is_40mhz = false; + + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) + priv->staging_rxon.flags = 0; + + iwl_set_rxon_channel(priv, conf->channel); + iwl_set_rxon_ht(priv, ht_conf); + iwl_set_flags_for_band(priv, conf->channel->band, + priv->vif); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + if (priv->cfg->ops->lib->set_channel_switch(priv, + ch_switch)) + priv->switch_rxon.switch_in_progress = false; + } + } +out: + mutex_unlock(&priv->mutex); +out_exit: + if (!priv->switch_rxon.switch_in_progress) + ieee80211_chswitch_done(priv->vif, false); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) +{ + struct iwl_priv *priv = hw->priv; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* do not support "flush" */ + if (!priv->cfg->ops->lib->txfifo_flush) + goto done; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); + goto done; + } + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); + goto done; + } + + /* + * mac80211 will not push any more frames for transmit + * until the flush is completed + */ + if (drop) { + IWL_DEBUG_MAC80211(priv, "send flush command\n"); + if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; + } + } + IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); + iwlagn_wait_tx_queue_empty(priv); +done: + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + /***************************************************************************** * * driver setup and teardown @@ -3439,6 +3764,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); + INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); @@ -3479,6 +3805,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) cancel_delayed_work(&priv->scan_check); cancel_work_sync(&priv->start_internal_scan); cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->run_time_calib_work); cancel_work_sync(&priv->beacon_update); del_timer_sync(&priv->statistics_periodic); del_timer_sync(&priv->ucode_trace); @@ -3594,6 +3921,8 @@ static struct ieee80211_ops iwl_hw_ops = { .sta_notify = iwl_mac_sta_notify, .sta_add = iwlagn_mac_sta_add, .sta_remove = iwl_mac_sta_remove, + .channel_switch = iwl_mac_channel_switch, + .flush = iwl_mac_flush, }; static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -3603,7 +3932,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ieee80211_hw *hw; struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); unsigned long flags; - u16 pci_cmd; + u16 pci_cmd, num_mac; /************************ * 1. Allocating HW data @@ -3633,9 +3962,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->pci_dev = pdev; priv->inta_mask = CSR_INI_SET_MASK; -#ifdef CONFIG_IWLWIFI_DEBUG - atomic_set(&priv->restrict_refcnt, 0); -#endif if (iwl_alloc_traffic_mem(priv)) IWL_ERR(priv, "Not enough memory to generate traffic log\n"); @@ -3724,9 +4050,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_eeprom; /* extract MAC Address */ - iwl_eeprom_get_mac(priv, priv->mac_addr); - IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); - SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + iwl_eeprom_get_mac(priv, priv->addresses[0].addr); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); + priv->hw->wiphy->addresses = priv->addresses; + priv->hw->wiphy->n_addresses = 1; + num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); + if (num_mac > 1) { + memcpy(priv->addresses[1].addr, priv->addresses[0].addr, + ETH_ALEN); + priv->addresses[1].addr[5]++; + priv->hw->wiphy->n_addresses++; + } /************************ * 5. Setup HW constants @@ -3993,6 +4327,47 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)}, + +/* 6x00 Series Gen2b */ + {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)}, + {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)}, + {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)}, + {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)}, + {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)}, + {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)}, /* 6x50 WiFi/WiMax Series */ {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, @@ -4002,6 +4377,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, +/* 6x50 WiFi/WiMax Series Gen2 */ + {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6050g2_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6050g2_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6050g2_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6050g2_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6050g2_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6050g2_bgn_cfg)}, + /* 1000 Series WiFi */ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, @@ -4036,19 +4419,18 @@ static int __init iwl_init(void) { int ret; - printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); - printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); ret = iwlagn_rate_control_register(); if (ret) { - printk(KERN_ERR DRV_NAME - "Unable to register rate control algorithm: %d\n", ret); + pr_err("Unable to register rate control algorithm: %d\n", ret); return ret; } ret = pci_register_driver(&iwl_driver); if (ret) { - printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); + pr_err("Unable to initialize PCI module\n"); goto error_register; } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index 2d748053358ec5482ebaee99e2473dd103cedda7..cc6464dc72e592bd57302423b84784f0b4b63d8d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -65,6 +65,34 @@ #include "iwl-dev.h" +/* configuration for the _agn devices */ +extern struct iwl_cfg iwl4965_agn_cfg; +extern struct iwl_cfg iwl5300_agn_cfg; +extern struct iwl_cfg iwl5100_agn_cfg; +extern struct iwl_cfg iwl5350_agn_cfg; +extern struct iwl_cfg iwl5100_bgn_cfg; +extern struct iwl_cfg iwl5100_abg_cfg; +extern struct iwl_cfg iwl5150_agn_cfg; +extern struct iwl_cfg iwl5150_abg_cfg; +extern struct iwl_cfg iwl6000g2a_2agn_cfg; +extern struct iwl_cfg iwl6000g2a_2abg_cfg; +extern struct iwl_cfg iwl6000g2a_2bg_cfg; +extern struct iwl_cfg iwl6000g2b_bgn_cfg; +extern struct iwl_cfg iwl6000g2b_bg_cfg; +extern struct iwl_cfg iwl6000g2b_2agn_cfg; +extern struct iwl_cfg iwl6000g2b_2abg_cfg; +extern struct iwl_cfg iwl6000g2b_2bgn_cfg; +extern struct iwl_cfg iwl6000g2b_2bg_cfg; +extern struct iwl_cfg iwl6000i_2agn_cfg; +extern struct iwl_cfg iwl6000i_2abg_cfg; +extern struct iwl_cfg iwl6000i_2bg_cfg; +extern struct iwl_cfg iwl6000_3agn_cfg; +extern struct iwl_cfg iwl6050_2agn_cfg; +extern struct iwl_cfg iwl6050_2abg_cfg; +extern struct iwl_cfg iwl6050g2_bgn_cfg; +extern struct iwl_cfg iwl1000_bgn_cfg; +extern struct iwl_cfg iwl1000_bg_cfg; + extern struct iwl_mod_params iwlagn_mod_params; extern struct iwl_hcmd_ops iwlagn_hcmd; extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; @@ -93,6 +121,8 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx, u8 tx_fifo); void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask); +void iwl_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed); /* uCode */ int iwlagn_load_ucode(struct iwl_priv *priv); @@ -102,6 +132,7 @@ void iwlagn_rx_calib_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); void iwlagn_init_alive_start(struct iwl_priv *priv); int iwlagn_alive_notify(struct iwl_priv *priv); +int iwl_verify_ucode(struct iwl_priv *priv); /* lib */ void iwl_check_abort_status(struct iwl_priv *priv, @@ -117,6 +148,9 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); int iwlagn_hw_nic_init(struct iwl_priv *priv); +int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv); +int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); +void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); /* rx */ void iwlagn_rx_queue_restock(struct iwl_priv *priv); @@ -171,6 +205,16 @@ static inline bool iwl_is_tx_success(u32 status) (status == TX_STATUS_DIRECT_DONE); } +/* rx */ +void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +bool iwl_good_plcp_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); +void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + /* scan */ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); @@ -178,4 +222,8 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); int iwlagn_manage_ibss_station(struct iwl_priv *priv, struct ieee80211_vif *vif, bool add); +/* hcmd */ +int iwlagn_send_rxon_assoc(struct iwl_priv *priv); +int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); + #endif /* __iwl_agn_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h index 2b7b1df83ba0fe1d718aa0a67dd4ec987914c2e9..ba9523fbb300dcc6974f0a526c4ebe91877b37c9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.h +++ b/drivers/net/wireless/iwlwifi/iwl-calib.h @@ -66,10 +66,8 @@ #include "iwl-core.h" #include "iwl-commands.h" -void iwl_chain_noise_calibration(struct iwl_priv *priv, - struct iwl_notif_statistics *stat_resp); -void iwl_sensitivity_calibration(struct iwl_priv *priv, - struct iwl_notif_statistics *resp); +void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp); +void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp); void iwl_init_sensitivity(struct iwl_priv *priv); void iwl_reset_run_time_calib(struct iwl_priv *priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 9aab020c474be305dd763f4a088b9c381a71608a..60725a5c1b694b689d42fe6e782c9c141a47ac74 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -95,8 +95,9 @@ enum { /* Multi-Station support */ REPLY_ADD_STA = 0x18, - REPLY_REMOVE_STA = 0x19, /* not used */ + REPLY_REMOVE_STA = 0x19, REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ + REPLY_TXFIFO_FLUSH = 0x1e, /* Security */ REPLY_WEPKEY = 0x20, @@ -227,7 +228,7 @@ struct iwl_cmd_header { /* command or response/notification data follows immediately */ u8 data[0]; -} __attribute__ ((packed)); +} __packed; /** @@ -247,7 +248,7 @@ struct iwl_cmd_header { struct iwl3945_tx_power { u8 tx_gain; /* gain for analog radio */ u8 dsp_atten; /* gain for DSP */ -} __attribute__ ((packed)); +} __packed; /** * struct iwl3945_power_per_rate @@ -258,7 +259,7 @@ struct iwl3945_power_per_rate { u8 rate; /* plcp */ struct iwl3945_tx_power tpc; u8 reserved; -} __attribute__ ((packed)); +} __packed; /** * iwlagn rate_n_flags bit fields @@ -389,7 +390,7 @@ union iwl4965_tx_power_dual_stream { */ struct tx_power_dual_stream { __le32 dw; -} __attribute__ ((packed)); +} __packed; /** * struct iwl4965_tx_power_db @@ -398,7 +399,7 @@ struct tx_power_dual_stream { */ struct iwl4965_tx_power_db { struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; -} __attribute__ ((packed)); +} __packed; /** * Command REPLY_TX_POWER_DBM_CMD = 0x98 @@ -412,7 +413,7 @@ struct iwl5000_tx_power_dbm_cmd { u8 flags; s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ u8 reserved; -} __attribute__ ((packed)); +} __packed; /** * Command TX_ANT_CONFIGURATION_CMD = 0x98 @@ -422,7 +423,7 @@ struct iwl5000_tx_power_dbm_cmd { */ struct iwl_tx_ant_config_cmd { __le32 valid; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (0a) @@ -478,7 +479,7 @@ struct iwl_init_alive_resp { __le32 therm_r4[2]; /* signed */ __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, * 2 Tx chains */ -} __attribute__ ((packed)); +} __packed; /** @@ -570,7 +571,7 @@ struct iwl_alive_resp { __le32 error_event_table_ptr; /* SRAM address for error log */ __le32 timestamp; __le32 is_valid; -} __attribute__ ((packed)); +} __packed; /* * REPLY_ERROR = 0x2 (response only, not a command) @@ -582,7 +583,7 @@ struct iwl_error_resp { __le16 bad_cmd_seq_num; __le32 error_info; __le64 timestamp; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (1) @@ -718,7 +719,7 @@ struct iwl3945_rxon_cmd { __le32 filter_flags; __le16 channel; __le16 reserved5; -} __attribute__ ((packed)); +} __packed; struct iwl4965_rxon_cmd { u8 node_addr[6]; @@ -738,7 +739,7 @@ struct iwl4965_rxon_cmd { __le16 channel; u8 ofdm_ht_single_stream_basic_rates; u8 ofdm_ht_dual_stream_basic_rates; -} __attribute__ ((packed)); +} __packed; /* 5000 HW just extend this command */ struct iwl_rxon_cmd { @@ -763,7 +764,7 @@ struct iwl_rxon_cmd { u8 reserved5; __le16 acquisition_data; __le16 reserved6; -} __attribute__ ((packed)); +} __packed; /* * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) @@ -774,7 +775,7 @@ struct iwl3945_rxon_assoc_cmd { u8 ofdm_basic_rates; u8 cck_basic_rates; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct iwl4965_rxon_assoc_cmd { __le32 flags; @@ -785,7 +786,7 @@ struct iwl4965_rxon_assoc_cmd { u8 ofdm_ht_dual_stream_basic_rates; __le16 rx_chain_select_flags; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct iwl5000_rxon_assoc_cmd { __le32 flags; @@ -800,7 +801,7 @@ struct iwl5000_rxon_assoc_cmd { __le16 rx_chain_select_flags; __le16 acquisition_data; __le32 reserved3; -} __attribute__ ((packed)); +} __packed; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ @@ -816,7 +817,7 @@ struct iwl_rxon_time_cmd { __le32 beacon_init_val; __le16 listen_interval; __le16 reserved; -} __attribute__ ((packed)); +} __packed; /* * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) @@ -829,7 +830,7 @@ struct iwl3945_channel_switch_cmd { __le32 rxon_filter_flags; __le32 switch_time; struct iwl3945_power_per_rate power[IWL_MAX_RATES]; -} __attribute__ ((packed)); +} __packed; struct iwl4965_channel_switch_cmd { u8 band; @@ -839,7 +840,7 @@ struct iwl4965_channel_switch_cmd { __le32 rxon_filter_flags; __le32 switch_time; struct iwl4965_tx_power_db tx_power; -} __attribute__ ((packed)); +} __packed; /** * struct iwl5000_channel_switch_cmd @@ -860,7 +861,7 @@ struct iwl5000_channel_switch_cmd { __le32 rxon_filter_flags; __le32 switch_time; __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; -} __attribute__ ((packed)); +} __packed; /** * struct iwl6000_channel_switch_cmd @@ -881,7 +882,7 @@ struct iwl6000_channel_switch_cmd { __le32 rxon_filter_flags; __le32 switch_time; __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; -} __attribute__ ((packed)); +} __packed; /* * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) @@ -890,7 +891,7 @@ struct iwl_csa_notification { __le16 band; __le16 channel; __le32 status; /* 0 - OK, 1 - fail */ -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (2) @@ -920,7 +921,7 @@ struct iwl_ac_qos { u8 aifsn; u8 reserved1; __le16 edca_txop; -} __attribute__ ((packed)); +} __packed; /* QoS flags defines */ #define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) @@ -939,7 +940,7 @@ struct iwl_ac_qos { struct iwl_qosparam_cmd { __le32 qos_flags; struct iwl_ac_qos ac[AC_NUM]; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (3) @@ -952,20 +953,19 @@ struct iwl_qosparam_cmd { /* Special, dedicated locations within device's station table */ #define IWL_AP_ID 0 -#define IWL_MULTICAST_ID 1 #define IWL_STA_ID 2 #define IWL3945_BROADCAST_ID 24 #define IWL3945_STATION_COUNT 25 #define IWL4965_BROADCAST_ID 31 #define IWL4965_STATION_COUNT 32 -#define IWL5000_BROADCAST_ID 15 -#define IWL5000_STATION_COUNT 16 +#define IWLAGN_BROADCAST_ID 15 +#define IWLAGN_STATION_COUNT 16 #define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ #define IWL_INVALID_STATION 255 -#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2); -#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8); +#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) +#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) #define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) #define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) #define STA_FLG_MAX_AGG_SIZE_POS (19) @@ -1015,7 +1015,7 @@ struct iwl4965_keyinfo { u8 key_offset; u8 reserved2; u8 key[16]; /* 16-byte unicast decryption key */ -} __attribute__ ((packed)); +} __packed; /* 5000 */ struct iwl_keyinfo { @@ -1029,7 +1029,7 @@ struct iwl_keyinfo { __le64 tx_secur_seq_cnt; __le64 hw_tkip_mic_rx_key; __le64 hw_tkip_mic_tx_key; -} __attribute__ ((packed)); +} __packed; /** * struct sta_id_modify @@ -1049,7 +1049,7 @@ struct sta_id_modify { u8 sta_id; u8 modify_mask; __le16 reserved2; -} __attribute__ ((packed)); +} __packed; /* * REPLY_ADD_STA = 0x18 (command) @@ -1103,7 +1103,7 @@ struct iwl3945_addsta_cmd { /* Starting Sequence Number for added block-ack support. * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ __le16 add_immediate_ba_ssn; -} __attribute__ ((packed)); +} __packed; struct iwl4965_addsta_cmd { u8 mode; /* 1: modify existing, 0: add new station */ @@ -1140,7 +1140,7 @@ struct iwl4965_addsta_cmd { __le16 sleep_tx_count; __le16 reserved2; -} __attribute__ ((packed)); +} __packed; /* 5000 */ struct iwl_addsta_cmd { @@ -1178,7 +1178,7 @@ struct iwl_addsta_cmd { __le16 sleep_tx_count; __le16 reserved2; -} __attribute__ ((packed)); +} __packed; #define ADD_STA_SUCCESS_MSK 0x1 @@ -1190,7 +1190,7 @@ struct iwl_addsta_cmd { */ struct iwl_add_sta_resp { u8 status; /* ADD_STA_* */ -} __attribute__ ((packed)); +} __packed; #define REM_STA_SUCCESS_MSK 0x1 /* @@ -1198,7 +1198,7 @@ struct iwl_add_sta_resp { */ struct iwl_rem_sta_resp { u8 status; -} __attribute__ ((packed)); +} __packed; /* * REPLY_REM_STA = 0x19 (command) @@ -1208,7 +1208,44 @@ struct iwl_rem_sta_cmd { u8 reserved[3]; u8 addr[ETH_ALEN]; /* MAC addr of the first station */ u8 reserved2[2]; -} __attribute__ ((packed)); +} __packed; + +#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) +#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) +#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) +#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) +#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) + +#define IWL_DROP_SINGLE 0 +#define IWL_DROP_SELECTED 1 +#define IWL_DROP_ALL 2 + +/* + * REPLY_TXFIFO_FLUSH = 0x1e(command and response) + * + * When using full FIFO flush this command checks the scheduler HW block WR/RD + * pointers to check if all the frames were transferred by DMA into the + * relevant TX FIFO queue. Only when the DMA is finished and the queue is + * empty the command can finish. + * This command is used to flush the TXFIFO from transmit commands, it may + * operate on single or multiple queues, the command queue can't be flushed by + * this command. The command response is returned when all the queue flush + * operations are done. Each TX command flushed return response with the FLUSH + * status set in the TX response status. When FIFO flush operation is used, + * the flush operation ends when both the scheduler DMA done and TXFIFO empty + * are set. + * + * @fifo_control: bit mask for which queues to flush + * @flush_control: flush controls + * 0: Dump single MSDU + * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable. + * 2: Dump all FIFO + */ +struct iwl_txfifo_flush_cmd { + __le32 fifo_control; + __le16 flush_control; + __le16 reserved; +} __packed; /* * REPLY_WEP_KEY = 0x20 @@ -1220,7 +1257,7 @@ struct iwl_wep_key { u8 key_size; u8 reserved2[3]; u8 key[16]; -} __attribute__ ((packed)); +} __packed; struct iwl_wep_cmd { u8 num_keys; @@ -1228,7 +1265,7 @@ struct iwl_wep_cmd { u8 flags; u8 reserved; struct iwl_wep_key key[0]; -} __attribute__ ((packed)); +} __packed; #define WEP_KEY_WEP_TYPE 1 #define WEP_KEYS_MAX 4 @@ -1282,7 +1319,7 @@ struct iwl3945_rx_frame_stats { __le16 sig_avg; __le16 noise_diff; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct iwl3945_rx_frame_hdr { __le16 channel; @@ -1291,13 +1328,13 @@ struct iwl3945_rx_frame_hdr { u8 rate; __le16 len; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct iwl3945_rx_frame_end { __le32 status; __le64 timestamp; __le32 beacon_timestamp; -} __attribute__ ((packed)); +} __packed; /* * REPLY_3945_RX = 0x1b (response only, not a command) @@ -1311,7 +1348,7 @@ struct iwl3945_rx_frame { struct iwl3945_rx_frame_stats stats; struct iwl3945_rx_frame_hdr hdr; struct iwl3945_rx_frame_end end; -} __attribute__ ((packed)); +} __packed; #define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) @@ -1327,7 +1364,7 @@ struct iwl4965_rx_non_cfg_phy { __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ u8 pad[0]; -} __attribute__ ((packed)); +} __packed; #define IWL50_RX_RES_PHY_CNT 8 @@ -1345,7 +1382,7 @@ struct iwl4965_rx_non_cfg_phy { struct iwl5000_non_cfg_phy { __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ -} __attribute__ ((packed)); +} __packed; /* @@ -1365,12 +1402,12 @@ struct iwl_rx_phy_res { __le32 rate_n_flags; /* RATE_MCS_* */ __le16 byte_count; /* frame's byte-count */ __le16 reserved3; -} __attribute__ ((packed)); +} __packed; -struct iwl4965_rx_mpdu_res_start { +struct iwl_rx_mpdu_res_start { __le16 byte_count; __le16 reserved; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** @@ -1400,18 +1437,27 @@ struct iwl4965_rx_mpdu_res_start { /* REPLY_TX Tx flags field */ -/* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it +/* + * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it * before this frame. if CTS-to-self required check - * RXON_FLG_SELF_CTS_EN status. */ -#define TX_CMD_FLG_RTS_CTS_MSK cpu_to_le32(1 << 0) + * RXON_FLG_SELF_CTS_EN status. + * unused in 3945/4965, used in 5000 series and after + */ +#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0) -/* 1: Use Request-To-Send protocol before this frame. - * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */ +/* + * 1: Use Request-To-Send protocol before this frame. + * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. + * used in 3945/4965, unused in 5000 series and after + */ #define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1) -/* 1: Transmit Clear-To-Send to self before this frame. +/* + * 1: Transmit Clear-To-Send to self before this frame. * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. - * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */ + * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. + * used in 3945/4965, unused in 5000 series and after + */ #define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2) /* 1: Expect ACK from receiving station @@ -1431,8 +1477,11 @@ struct iwl4965_rx_mpdu_res_start { * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ #define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) -/* 1: Frame requires full Tx-Op protection. - * Set this if either RTS or CTS Tx Flag gets set. */ +/* + * 1: Frame requires full Tx-Op protection. + * Set this if either RTS or CTS Tx Flag gets set. + * used in 3945/4965, unused in 5000 series and after + */ #define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) /* Tx antenna selection field; used only for 3945, reserved (0) for 4965. @@ -1557,7 +1606,7 @@ struct iwl3945_tx_cmd { */ u8 payload[0]; struct ieee80211_hdr hdr[0]; -} __attribute__ ((packed)); +} __packed; /* * REPLY_TX = 0x1c (response) @@ -1569,7 +1618,7 @@ struct iwl3945_tx_resp { u8 rate; __le32 wireless_media_time; __le32 status; /* TX status */ -} __attribute__ ((packed)); +} __packed; /* @@ -1581,7 +1630,7 @@ struct iwl_dram_scratch { u8 try_cnt; /* Tx attempts */ u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct iwl_tx_cmd { /* @@ -1660,7 +1709,7 @@ struct iwl_tx_cmd { */ u8 payload[0]; struct ieee80211_hdr hdr[0]; -} __attribute__ ((packed)); +} __packed; /* TX command response is sent after *3945* transmission attempts. * @@ -1826,7 +1875,7 @@ enum { struct agg_tx_status { __le16 status; __le16 sequence; -} __attribute__ ((packed)); +} __packed; struct iwl4965_tx_resp { u8 frame_count; /* 1 no aggregation, >1 aggregation */ @@ -1863,7 +1912,7 @@ struct iwl4965_tx_resp { __le32 status; struct agg_tx_status agg_status[0]; /* for each agg frame */ } u; -} __attribute__ ((packed)); +} __packed; /* * definitions for initial rate index field @@ -1927,7 +1976,7 @@ struct iwl5000_tx_resp { */ struct agg_tx_status status; /* TX status (in aggregation - * status of 1st frame) */ -} __attribute__ ((packed)); +} __packed; /* * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) * @@ -1945,7 +1994,7 @@ struct iwl_compressed_ba_resp { __le64 bitmap; __le16 scd_flow; __le16 scd_ssn; -} __attribute__ ((packed)); +} __packed; /* * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) @@ -1958,14 +2007,14 @@ struct iwl3945_txpowertable_cmd { u8 reserved; __le16 channel; struct iwl3945_power_per_rate power[IWL_MAX_RATES]; -} __attribute__ ((packed)); +} __packed; struct iwl4965_txpowertable_cmd { u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ u8 reserved; __le16 channel; struct iwl4965_tx_power_db tx_power; -} __attribute__ ((packed)); +} __packed; /** @@ -1987,13 +2036,13 @@ struct iwl3945_rate_scaling_info { __le16 rate_n_flags; u8 try_cnt; u8 next_rate_index; -} __attribute__ ((packed)); +} __packed; struct iwl3945_rate_scaling_cmd { u8 table_id; u8 reserved[3]; struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; -} __attribute__ ((packed)); +} __packed; /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ @@ -2040,7 +2089,7 @@ struct iwl_link_qual_general_params { * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. */ u8 start_rate_index[LINK_QUAL_AC_NUM]; -} __attribute__ ((packed)); +} __packed; #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ #define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) @@ -2081,7 +2130,7 @@ struct iwl_link_qual_agg_params { u8 agg_frame_cnt_limit; __le32 reserved; -} __attribute__ ((packed)); +} __packed; /* * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) @@ -2287,7 +2336,7 @@ struct iwl_link_quality_cmd { __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ } rs_table[LINK_QUAL_MAX_RETRY_NUM]; __le32 reserved2; -} __attribute__ ((packed)); +} __packed; /* * BT configuration enable flags: @@ -2328,7 +2377,7 @@ struct iwl_bt_cmd { u8 reserved; __le32 kill_ack_mask; __le32 kill_cts_mask; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (6) @@ -2353,7 +2402,7 @@ struct iwl_measure_channel { u8 channel; /* channel to measure */ u8 type; /* see enum iwl_measure_type */ __le16 reserved; -} __attribute__ ((packed)); +} __packed; /* * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) @@ -2372,7 +2421,7 @@ struct iwl_spectrum_cmd { __le16 channel_count; /* minimum 1, maximum 10 */ __le16 reserved3; struct iwl_measure_channel channels[10]; -} __attribute__ ((packed)); +} __packed; /* * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) @@ -2383,7 +2432,7 @@ struct iwl_spectrum_resp { __le16 status; /* 0 - command will be handled * 1 - cannot handle (conflicts with another * measurement) */ -} __attribute__ ((packed)); +} __packed; enum iwl_measurement_state { IWL_MEASUREMENT_START = 0, @@ -2406,13 +2455,13 @@ enum iwl_measurement_status { struct iwl_measurement_histogram { __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ -} __attribute__ ((packed)); +} __packed; /* clear channel availability counters */ struct iwl_measurement_cca_counters { __le32 ofdm; __le32 cck; -} __attribute__ ((packed)); +} __packed; enum iwl_measure_type { IWL_MEASURE_BASIC = (1 << 0), @@ -2448,7 +2497,7 @@ struct iwl_spectrum_notification { struct iwl_measurement_histogram histogram; __le32 stop_time; /* lower 32-bits of TSF */ __le32 status; /* see iwl_measurement_status */ -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (7) @@ -2504,7 +2553,7 @@ struct iwl3945_powertable_cmd { __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; -} __attribute__ ((packed)); +} __packed; struct iwl_powertable_cmd { __le16 flags; @@ -2514,7 +2563,7 @@ struct iwl_powertable_cmd { __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; __le32 keep_alive_beacons; -} __attribute__ ((packed)); +} __packed; /* * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) @@ -2527,7 +2576,7 @@ struct iwl_sleep_notification { __le32 sleep_time; __le32 tsf_low; __le32 bcon_timer; -} __attribute__ ((packed)); +} __packed; /* Sleep states. 3945 and 4965 identical. */ enum { @@ -2552,14 +2601,14 @@ enum { #define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ struct iwl_card_state_cmd { __le32 status; /* CARD_STATE_CMD_* request new power state */ -} __attribute__ ((packed)); +} __packed; /* * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) */ struct iwl_card_state_notif { __le32 flags; -} __attribute__ ((packed)); +} __packed; #define HW_CARD_DISABLED 0x01 #define SW_CARD_DISABLED 0x02 @@ -2570,14 +2619,14 @@ struct iwl_ct_kill_config { __le32 reserved; __le32 critical_temperature_M; __le32 critical_temperature_R; -} __attribute__ ((packed)); +} __packed; /* 1000, and 6x00 */ struct iwl_ct_kill_throttling_config { __le32 critical_temperature_exit; __le32 reserved; __le32 critical_temperature_enter; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (8) @@ -2622,7 +2671,7 @@ struct iwl3945_scan_channel { struct iwl3945_tx_power tpc; __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ -} __attribute__ ((packed)); +} __packed; /* set number of direct probes u8 type */ #define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) @@ -2641,7 +2690,7 @@ struct iwl_scan_channel { u8 dsp_atten; /* gain for DSP */ __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ -} __attribute__ ((packed)); +} __packed; /* set number of direct probes __le32 type */ #define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) @@ -2658,7 +2707,7 @@ struct iwl_ssid_ie { u8 id; u8 len; u8 ssid[32]; -} __attribute__ ((packed)); +} __packed; #define PROBE_OPTION_MAX_3945 4 #define PROBE_OPTION_MAX 20 @@ -2764,7 +2813,7 @@ struct iwl3945_scan_cmd { * before requesting another scan. */ u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct iwl_scan_cmd { __le16 len; @@ -2808,7 +2857,7 @@ struct iwl_scan_cmd { * before requesting another scan. */ u8 data[0]; -} __attribute__ ((packed)); +} __packed; /* Can abort will notify by complete notification with abort status. */ #define CAN_ABORT_STATUS cpu_to_le32(0x1) @@ -2820,7 +2869,7 @@ struct iwl_scan_cmd { */ struct iwl_scanreq_notification { __le32 status; /* 1: okay, 2: cannot fulfill request */ -} __attribute__ ((packed)); +} __packed; /* * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) @@ -2833,7 +2882,7 @@ struct iwl_scanstart_notification { u8 band; u8 reserved[2]; __le32 status; -} __attribute__ ((packed)); +} __packed; #define SCAN_OWNER_STATUS 0x1; #define MEASURE_OWNER_STATUS 0x2; @@ -2849,7 +2898,7 @@ struct iwl_scanresults_notification { __le32 tsf_low; __le32 tsf_high; __le32 statistics[NUMBER_OF_STATISTICS]; -} __attribute__ ((packed)); +} __packed; /* * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) @@ -2861,7 +2910,7 @@ struct iwl_scancomplete_notification { u8 last_channel; __le32 tsf_low; __le32 tsf_high; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** @@ -2879,14 +2928,14 @@ struct iwl3945_beacon_notif { __le32 low_tsf; __le32 high_tsf; __le32 ibss_mgr_status; -} __attribute__ ((packed)); +} __packed; struct iwl4965_beacon_notif { struct iwl4965_tx_resp beacon_notify_hdr; __le32 low_tsf; __le32 high_tsf; __le32 ibss_mgr_status; -} __attribute__ ((packed)); +} __packed; /* * REPLY_TX_BEACON = 0x91 (command, has simple generic response) @@ -2898,7 +2947,7 @@ struct iwl3945_tx_beacon_cmd { u8 tim_size; u8 reserved1; struct ieee80211_hdr frame[0]; /* beacon frame */ -} __attribute__ ((packed)); +} __packed; struct iwl_tx_beacon_cmd { struct iwl_tx_cmd tx; @@ -2906,7 +2955,7 @@ struct iwl_tx_beacon_cmd { u8 tim_size; u8 reserved1; struct ieee80211_hdr frame[0]; /* beacon frame */ -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (10) @@ -2932,7 +2981,7 @@ struct rate_histogram { __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; } failed; -} __attribute__ ((packed)); +} __packed; /* statistics command response */ @@ -2952,7 +3001,7 @@ struct iwl39_statistics_rx_phy { __le32 rxe_frame_limit_overrun; __le32 sent_ack_cnt; __le32 sent_cts_cnt; -} __attribute__ ((packed)); +} __packed; struct iwl39_statistics_rx_non_phy { __le32 bogus_cts; /* CTS received when not expecting CTS */ @@ -2963,13 +3012,13 @@ struct iwl39_statistics_rx_non_phy { * filtering process */ __le32 non_channel_beacons; /* beacons with our bss id but not on * our serving channel */ -} __attribute__ ((packed)); +} __packed; struct iwl39_statistics_rx { struct iwl39_statistics_rx_phy ofdm; struct iwl39_statistics_rx_phy cck; struct iwl39_statistics_rx_non_phy general; -} __attribute__ ((packed)); +} __packed; struct iwl39_statistics_tx { __le32 preamble_cnt; @@ -2981,20 +3030,21 @@ struct iwl39_statistics_tx { __le32 ack_timeout; __le32 expected_ack_cnt; __le32 actual_ack_cnt; -} __attribute__ ((packed)); +} __packed; struct statistics_dbg { __le32 burst_check; __le32 burst_count; - __le32 reserved[4]; -} __attribute__ ((packed)); + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +} __packed; struct iwl39_statistics_div { __le32 tx_on_a; __le32 tx_on_b; __le32 exec_time; __le32 probe_time; -} __attribute__ ((packed)); +} __packed; struct iwl39_statistics_general { __le32 temperature; @@ -3004,7 +3054,7 @@ struct iwl39_statistics_general { __le32 slots_idle; __le32 ttl_timestamp; struct iwl39_statistics_div div; -} __attribute__ ((packed)); +} __packed; struct statistics_rx_phy { __le32 ina_cnt; @@ -3027,7 +3077,7 @@ struct statistics_rx_phy { __le32 mh_format_err; __le32 re_acq_main_rssi_sum; __le32 reserved3; -} __attribute__ ((packed)); +} __packed; struct statistics_rx_ht_phy { __le32 plcp_err; @@ -3040,7 +3090,7 @@ struct statistics_rx_ht_phy { __le32 agg_mpdu_cnt; __le32 agg_cnt; __le32 unsupport_mcs; -} __attribute__ ((packed)); +} __packed; #define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) @@ -3075,14 +3125,28 @@ struct statistics_rx_non_phy { __le32 beacon_energy_a; __le32 beacon_energy_b; __le32 beacon_energy_c; -} __attribute__ ((packed)); +} __packed; + +struct statistics_rx_non_phy_bt { + struct statistics_rx_non_phy common; + /* additional stats for bt */ + __le32 num_bt_kills; + __le32 reserved[2]; +} __packed; struct statistics_rx { struct statistics_rx_phy ofdm; struct statistics_rx_phy cck; struct statistics_rx_non_phy general; struct statistics_rx_ht_phy ofdm_ht; -} __attribute__ ((packed)); +} __packed; + +struct statistics_rx_bt { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy_bt general; + struct statistics_rx_ht_phy ofdm_ht; +} __packed; /** * struct statistics_tx_power - current tx power @@ -3096,7 +3160,7 @@ struct statistics_tx_power { u8 ant_b; u8 ant_c; u8 reserved; -} __attribute__ ((packed)); +} __packed; struct statistics_tx_non_phy_agg { __le32 ba_timeout; @@ -3109,7 +3173,7 @@ struct statistics_tx_non_phy_agg { __le32 underrun; __le32 bt_prio_kill; __le32 rx_ba_rsp_cnt; -} __attribute__ ((packed)); +} __packed; struct statistics_tx { __le32 preamble_cnt; @@ -3134,7 +3198,7 @@ struct statistics_tx { */ struct statistics_tx_power tx_power; __le32 reserved1; -} __attribute__ ((packed)); +} __packed; struct statistics_div { @@ -3144,9 +3208,9 @@ struct statistics_div { __le32 probe_time; __le32 reserved1; __le32 reserved2; -} __attribute__ ((packed)); +} __packed; -struct statistics_general { +struct statistics_general_common { __le32 temperature; /* radio temperature */ __le32 temperature_m; /* for 5000 and up, this is radio voltage */ struct statistics_dbg dbg; @@ -3162,9 +3226,33 @@ struct statistics_general { * in order to get out of bad PHY status */ __le32 num_of_sos_states; +} __packed; + +struct statistics_bt_activity { + /* Tx statistics */ + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + /* Rx statistics */ + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +} __packed; + +struct statistics_general { + struct statistics_general_common common; __le32 reserved2; __le32 reserved3; -} __attribute__ ((packed)); +} __packed; + +struct statistics_general_bt { + struct statistics_general_common common; + struct statistics_bt_activity activity; + __le32 reserved2; + __le32 reserved3; +} __packed; #define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) #define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) @@ -3189,7 +3277,7 @@ struct statistics_general { #define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ struct iwl_statistics_cmd { __le32 configuration_flags; /* IWL_STATS_CONF_* */ -} __attribute__ ((packed)); +} __packed; /* * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) @@ -3214,15 +3302,21 @@ struct iwl3945_notif_statistics { struct iwl39_statistics_rx rx; struct iwl39_statistics_tx tx; struct iwl39_statistics_general general; -} __attribute__ ((packed)); +} __packed; struct iwl_notif_statistics { __le32 flag; struct statistics_rx rx; struct statistics_tx tx; struct statistics_general general; -} __attribute__ ((packed)); +} __packed; +struct iwl_bt_notif_statistics { + __le32 flag; + struct statistics_rx_bt rx; + struct statistics_tx tx; + struct statistics_general_bt general; +} __packed; /* * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) @@ -3253,7 +3347,7 @@ struct iwl_missed_beacon_notif { __le32 total_missed_becons; __le32 num_expected_beacons; __le32 num_recvd_beacons; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** @@ -3441,6 +3535,41 @@ struct iwl_missed_beacon_notif { #define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) #define HD_OFDM_ENERGY_TH_IN_INDEX (10) +/* + * Additional table entries in enhance SENSITIVITY_CMD + */ +#define HD_INA_NON_SQUARE_DET_OFDM_INDEX (11) +#define HD_INA_NON_SQUARE_DET_CCK_INDEX (12) +#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX (13) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX (14) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (15) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX (16) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX (17) +#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX (18) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (19) +#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX (20) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX (21) +#define HD_RESERVED (22) + +/* number of entries for enhanced tbl */ +#define ENHANCE_HD_TABLE_SIZE (23) + +/* number of additional entries for enhanced tbl */ +#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE) + +#define HD_INA_NON_SQUARE_DET_OFDM_DATA cpu_to_le16(0) +#define HD_INA_NON_SQUARE_DET_CCK_DATA cpu_to_le16(0) +#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA cpu_to_le16(0) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(668) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4) +#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(486) +#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(37) +#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA cpu_to_le16(853) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA cpu_to_le16(4) +#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA cpu_to_le16(476) +#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA cpu_to_le16(99) + + /* Control field in struct iwl_sensitivity_cmd */ #define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) #define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) @@ -3455,7 +3584,15 @@ struct iwl_missed_beacon_notif { struct iwl_sensitivity_cmd { __le16 control; /* always use "1" */ __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ -} __attribute__ ((packed)); +} __packed; + +/* + * + */ +struct iwl_enhance_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 enhance_table[ENHANCE_HD_TABLE_SIZE]; /* use HD_* as index */ +} __packed; /** @@ -3523,10 +3660,10 @@ enum { IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, - IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18, - IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19, + IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 18, }; +#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) #define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff) @@ -3536,31 +3673,31 @@ struct iwl_calib_cfg_elmnt_s { __le32 send_res; __le32 apply_res; __le32 reserved; -} __attribute__ ((packed)); +} __packed; struct iwl_calib_cfg_status_s { struct iwl_calib_cfg_elmnt_s once; struct iwl_calib_cfg_elmnt_s perd; __le32 flags; -} __attribute__ ((packed)); +} __packed; struct iwl_calib_cfg_cmd { struct iwl_calib_cfg_status_s ucd_calib_cfg; struct iwl_calib_cfg_status_s drv_calib_cfg; __le32 reserved1; -} __attribute__ ((packed)); +} __packed; struct iwl_calib_hdr { u8 op_code; u8 first_group; u8 groups_num; u8 data_valid; -} __attribute__ ((packed)); +} __packed; struct iwl_calib_cmd { struct iwl_calib_hdr hdr; u8 data[0]; -} __attribute__ ((packed)); +} __packed; /* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ struct iwl_calib_diff_gain_cmd { @@ -3569,14 +3706,14 @@ struct iwl_calib_diff_gain_cmd { s8 diff_gain_b; s8 diff_gain_c; u8 reserved1; -} __attribute__ ((packed)); +} __packed; struct iwl_calib_xtal_freq_cmd { struct iwl_calib_hdr hdr; u8 cap_pin1; u8 cap_pin2; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; /* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ struct iwl_calib_chain_noise_reset_cmd { @@ -3590,7 +3727,7 @@ struct iwl_calib_chain_noise_gain_cmd { u8 delta_gain_1; u8 delta_gain_2; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** * (12) @@ -3613,7 +3750,7 @@ struct iwl_led_cmd { u8 on; /* # intervals on while blinking; * "0", regardless of "off", turns LED off */ u8 reserved; -} __attribute__ ((packed)); +} __packed; /* * station priority table entries @@ -3749,7 +3886,7 @@ struct iwl_wimax_coex_event_entry { u8 win_medium_prio; u8 reserved; u8 flags; -} __attribute__ ((packed)); +} __packed; /* COEX flag masks */ @@ -3766,7 +3903,7 @@ struct iwl_wimax_coex_cmd { u8 flags; u8 reserved[3]; struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; -} __attribute__ ((packed)); +} __packed; /* * Coexistence MEDIUM NOTIFICATION @@ -3795,7 +3932,7 @@ struct iwl_wimax_coex_cmd { struct iwl_coex_medium_notification { __le32 status; __le32 events; -} __attribute__ ((packed)); +} __packed; /* * Coexistence EVENT Command @@ -3810,11 +3947,11 @@ struct iwl_coex_event_cmd { u8 flags; u8 event; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct iwl_coex_event_resp { __le32 status; -} __attribute__ ((packed)); +} __packed; /****************************************************************************** @@ -3851,6 +3988,7 @@ struct iwl_rx_packet { struct iwl_sleep_notification sleep_notif; struct iwl_spectrum_resp spectrum; struct iwl_notif_statistics stats; + struct iwl_bt_notif_statistics stats_bt; struct iwl_compressed_ba_resp compressed_ba; struct iwl_missed_beacon_notif missed_beacon; struct iwl_coex_medium_notification coex_medium_notif; @@ -3858,7 +3996,7 @@ struct iwl_rx_packet { __le32 status; u8 raw[0]; } u; -} __attribute__ ((packed)); +} __packed; int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 5bbc5298ef96e50b29206ea3e5ea8ca408e22695..8024d44ce4bb592551e72aaca266614b97d595ab 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -141,13 +141,14 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) } EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); -u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant) +u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) { int i; u8 ind = ant; + for (i = 0; i < RATE_ANT_NUM - 1; i++) { ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; - if (priv->hw_params.valid_tx_ant & BIT(ind)) + if (valid & BIT(ind)) return ind; } return ant; @@ -169,7 +170,7 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, struct ieee80211_hw *hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); if (hw == NULL) { - printk(KERN_ERR "%s: Can not allocate network device\n", + pr_err("%s: Can not allocate network device\n", cfg->name); goto out; } @@ -457,7 +458,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, if (!sta_ht_inf->ht_supported) return 0; } -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_DEBUGFS if (priv->disable_ht40) return 0; #endif @@ -506,11 +507,11 @@ void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif) } beacon_int = iwl_adjust_beacon_interval(beacon_int, - priv->hw_params.max_beacon_itrvl * 1024); + priv->hw_params.max_beacon_itrvl * TIME_UNIT); priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int); tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ - interval_tm = beacon_int * 1024; + interval_tm = beacon_int * TIME_UNIT; rem = do_div(tsf, interval_tm); priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem); @@ -932,9 +933,9 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch) } EXPORT_SYMBOL(iwl_set_rxon_channel); -static void iwl_set_flags_for_band(struct iwl_priv *priv, - enum ieee80211_band band, - struct ieee80211_vif *vif) +void iwl_set_flags_for_band(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif) { if (band == IEEE80211_BAND_5GHZ) { priv->staging_rxon.flags &= @@ -943,19 +944,17 @@ static void iwl_set_flags_for_band(struct iwl_priv *priv, priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; } else { /* Copied from iwl_post_associate() */ - if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + if (vif && vif->bss_conf.use_short_slot) priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - if (vif && vif->type == NL80211_IFTYPE_ADHOC) - priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; } } +EXPORT_SYMBOL(iwl_set_flags_for_band); /* * initialize rxon structure with default values from eeprom @@ -1021,15 +1020,17 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, /* clear both MIX and PURE40 mode flag */ priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); - memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); - memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); + + if (vif) + memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN); + priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff; } EXPORT_SYMBOL(iwl_connection_init_rx_config); -static void iwl_set_rate(struct iwl_priv *priv) +void iwl_set_rate(struct iwl_priv *priv) { const struct ieee80211_supported_band *hw = NULL; struct ieee80211_rate *rate; @@ -1057,6 +1058,21 @@ static void iwl_set_rate(struct iwl_priv *priv) priv->staging_rxon.ofdm_basic_rates = (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; } +EXPORT_SYMBOL(iwl_set_rate); + +void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->switch_rxon.switch_in_progress) { + ieee80211_chswitch_done(priv->vif, is_success); + mutex_lock(&priv->mutex); + priv->switch_rxon.switch_in_progress = false; + mutex_unlock(&priv->mutex); + } +} +EXPORT_SYMBOL(iwl_chswitch_done); void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { @@ -1071,11 +1087,12 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) priv->staging_rxon.channel = csa->channel; IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", le16_to_cpu(csa->channel)); - } else + iwl_chswitch_done(priv, true); + } else { IWL_ERR(priv, "CSA notif (fail) : channel %d\n", le16_to_cpu(csa->channel)); - - priv->switch_rxon.switch_in_progress = false; + iwl_chswitch_done(priv, false); + } } } EXPORT_SYMBOL(iwl_rx_csa); @@ -1507,130 +1524,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) } EXPORT_SYMBOL(iwl_send_statistics_request); -/** - * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, - * using sample data 100 bytes apart. If these sample points are good, - * it's a pretty good bet that everything between them is good, too. - */ -static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) -{ - u32 val; - int ret = 0; - u32 errcnt = 0; - u32 i; - - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); - - for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IWL_DL_IO is set */ - iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, - i + IWL49_RTC_INST_LOWER_BOUND); - val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - ret = -EIO; - errcnt++; - if (errcnt >= 3) - break; - } - } - - return ret; -} - -/** - * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host, - * looking at all data. - */ -static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image, - u32 len) -{ - u32 val; - u32 save_len = len; - int ret = 0; - u32 errcnt; - - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); - - iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, - IWL49_RTC_INST_LOWER_BOUND); - - errcnt = 0; - for (; len > 0; len -= sizeof(u32), image++) { - /* read data comes through single port, auto-incr addr */ - /* NOTE: Use the debugless read so we don't flood kernel log - * if IWL_DL_IO is set */ - val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); - if (val != le32_to_cpu(*image)) { - IWL_ERR(priv, "uCode INST section is invalid at " - "offset 0x%x, is 0x%x, s/b 0x%x\n", - save_len - len, val, le32_to_cpu(*image)); - ret = -EIO; - errcnt++; - if (errcnt >= 20) - break; - } - } - - if (!errcnt) - IWL_DEBUG_INFO(priv, - "ucode image in INSTRUCTION memory is good\n"); - - return ret; -} - -/** - * iwl_verify_ucode - determine which instruction image is in SRAM, - * and verify its contents - */ -int iwl_verify_ucode(struct iwl_priv *priv) -{ - __le32 *image; - u32 len; - int ret; - - /* Try bootstrap */ - image = (__le32 *)priv->ucode_boot.v_addr; - len = priv->ucode_boot.len; - ret = iwlcore_verify_inst_sparse(priv, image, len); - if (!ret) { - IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); - return 0; - } - - /* Try initialize */ - image = (__le32 *)priv->ucode_init.v_addr; - len = priv->ucode_init.len; - ret = iwlcore_verify_inst_sparse(priv, image, len); - if (!ret) { - IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); - return 0; - } - - /* Try runtime/protocol */ - image = (__le32 *)priv->ucode_code.v_addr; - len = priv->ucode_code.len; - ret = iwlcore_verify_inst_sparse(priv, image, len); - if (!ret) { - IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); - return 0; - } - - IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); - - /* Since nothing seems to match, show first several data entries in - * instruction SRAM, so maybe visual inspection will give a clue. - * Selection of bootstrap image (vs. other images) is arbitrary. */ - image = (__le32 *)priv->ucode_boot.v_addr; - len = priv->ucode_boot.len; - ret = iwl_verify_inst_full(priv, image, len); - - return ret; -} -EXPORT_SYMBOL(iwl_verify_ucode); - - void iwl_rf_kill_ct_config(struct iwl_priv *priv) { struct iwl_ct_kill_config cmd; @@ -1855,6 +1748,37 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv) iwlcore_commit_rxon(priv); } +static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + __le64 timestamp; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = skb; + + timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; + priv->timestamp = le64_to_cpu(timestamp); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + + priv->cfg->ops->lib->post_associate(priv, priv->vif); + + return 0; +} + void iwl_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1870,6 +1794,15 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, mutex_lock(&priv->mutex); + if (changes & BSS_CHANGED_QOS) { + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.qos_active = bss_conf->qos; + iwl_update_qos(priv); + spin_unlock_irqrestore(&priv->lock, flags); + } + if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) { dev_kfree_skb(priv->ibss_beacon); priv->ibss_beacon = ieee80211_beacon_get(hw, vif); @@ -2012,38 +1945,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, } EXPORT_SYMBOL(iwl_bss_info_changed); -int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct iwl_priv *priv = hw->priv; - unsigned long flags; - __le64 timestamp; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - if (!iwl_is_ready_rf(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); - return -EIO; - } - - spin_lock_irqsave(&priv->lock, flags); - - if (priv->ibss_beacon) - dev_kfree_skb(priv->ibss_beacon); - - priv->ibss_beacon = skb; - - timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; - priv->timestamp = le64_to_cpu(timestamp); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - spin_unlock_irqrestore(&priv->lock, flags); - - priv->cfg->ops->lib->post_associate(priv, priv->vif); - - return 0; -} -EXPORT_SYMBOL(iwl_mac_beacon_update); - static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif) { iwl_connection_init_rx_config(priv, vif); @@ -2051,8 +1952,6 @@ static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif) if (priv->cfg->ops->hcmd->set_rxon_chain) priv->cfg->ops->hcmd->set_rxon_chain(priv); - memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); - return iwlcore_commit_rxon(priv); } @@ -2061,7 +1960,8 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) struct iwl_priv *priv = hw->priv; int err = 0; - IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type); + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", + vif->type, vif->addr); mutex_lock(&priv->mutex); @@ -2079,9 +1979,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) priv->vif = vif; priv->iw_mode = vif->type; - IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr); - memcpy(priv->mac_addr, vif->addr, ETH_ALEN); - err = iwl_set_mode(priv, vif); if (err) goto out_err; @@ -2115,6 +2012,11 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw, } if (priv->vif == vif) { priv->vif = NULL; + if (priv->scan_vif == vif) { + ieee80211_scan_completed(priv->hw, true); + priv->scan_vif = NULL; + priv->scan_request = NULL; + } memset(priv->bssid, 0, ETH_ALEN); } mutex_unlock(&priv->mutex); @@ -2215,22 +2117,10 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) iwl_set_flags_for_band(priv, conf->channel->band, priv->vif); spin_unlock_irqrestore(&priv->lock, flags); - if (iwl_is_associated(priv) && - (le16_to_cpu(priv->active_rxon.channel) != ch) && - priv->cfg->ops->lib->set_channel_switch) { - iwl_set_rate(priv); - /* - * at this point, staging_rxon has the - * configuration for channel switch - */ - ret = priv->cfg->ops->lib->set_channel_switch(priv, - ch); - if (!ret) { - iwl_print_rx_config_cmd(priv); - goto out; - } - priv->switch_rxon.switch_in_progress = false; - } + + if (priv->cfg->ops->lib->update_bcast_station) + ret = priv->cfg->ops->lib->update_bcast_station(priv); + set_ch_out: /* The list of supported rates and rate mask can be different * for each band; since the band may have changed, reset @@ -2252,15 +2142,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) iwl_set_tx_power(priv, conf->power_level, false); } - if (changed & IEEE80211_CONF_CHANGE_QOS) { - bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS); - - spin_lock_irqsave(&priv->lock, flags); - priv->qos_data.qos_active = qos_active; - iwl_update_qos(priv); - spin_unlock_irqrestore(&priv->lock, flags); - } - if (!iwl_is_ready(priv)) { IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); goto out; @@ -2588,7 +2469,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) EXPORT_SYMBOL(iwl_update_stats); #endif -const static char *get_csr_string(int cmd) +static const char *get_csr_string(int cmd) { switch (cmd) { IWL_CMD(CSR_HW_IF_CONFIG_REG); @@ -2659,7 +2540,7 @@ void iwl_dump_csr(struct iwl_priv *priv) } EXPORT_SYMBOL(iwl_dump_csr); -const static char *get_fh_string(int cmd) +static const char *get_fh_string(int cmd) { switch (cmd) { IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); @@ -2745,7 +2626,7 @@ static void iwl_force_rf_reset(struct iwl_priv *priv) } -int iwl_force_reset(struct iwl_priv *priv, int mode) +int iwl_force_reset(struct iwl_priv *priv, int mode, bool external) { struct iwl_force_reset *force_reset; @@ -2758,12 +2639,14 @@ int iwl_force_reset(struct iwl_priv *priv, int mode) } force_reset = &priv->force_reset[mode]; force_reset->reset_request_count++; - if (force_reset->last_force_reset_jiffies && - time_after(force_reset->last_force_reset_jiffies + - force_reset->reset_duration, jiffies)) { - IWL_DEBUG_INFO(priv, "force reset rejected\n"); - force_reset->reset_reject_count++; - return -EAGAIN; + if (!external) { + if (force_reset->last_force_reset_jiffies && + time_after(force_reset->last_force_reset_jiffies + + force_reset->reset_duration, jiffies)) { + IWL_DEBUG_INFO(priv, "force reset rejected\n"); + force_reset->reset_reject_count++; + return -EAGAIN; + } } force_reset->reset_success_count++; force_reset->last_force_reset_jiffies = jiffies; @@ -2773,6 +2656,19 @@ int iwl_force_reset(struct iwl_priv *priv, int mode) iwl_force_rf_reset(priv); break; case IWL_FW_RESET: + /* + * if the request is from external(ex: debugfs), + * then always perform the request in regardless the module + * parameter setting + * if the request is from internal (uCode error or driver + * detect failure), then fw_restart module parameter + * need to be check before performing firmware reload + */ + if (!external && !priv->cfg->mod_params->restart_fw) { + IWL_DEBUG_INFO(priv, "Cancel firmware reload based on " + "module parameter setting\n"); + break; + } IWL_ERR(priv, "On demand firmware reload\n"); /* Set the FW error flag -- cleared on iwl_down */ set_bit(STATUS_FW_ERROR, &priv->status); @@ -2831,7 +2727,7 @@ static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt) "queue %d stuck %d time. Fw reload.\n", q->id, q->repeat_same_read_ptr); q->repeat_same_read_ptr = 0; - iwl_force_reset(priv, IWL_FW_RESET); + iwl_force_reset(priv, IWL_FW_RESET, false); } else { q->repeat_same_read_ptr++; IWL_DEBUG_RADIO(priv, @@ -2881,6 +2777,61 @@ void iwl_bg_monitor_recover(unsigned long data) } EXPORT_SYMBOL(iwl_bg_monitor_recover); + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in extended:internal format + * the extended part is the beacon counts + * the internal part is the time in usec within one beacon interval + */ +u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * TIME_UNIT; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & + (iwl_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits) >> + priv->hw_params.beacon_time_tsf_bits); + rem = (usec % interval) & iwl_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + + return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; +} +EXPORT_SYMBOL(iwl_usecs_to_beacons); + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ +__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval) +{ + u32 base_low = base & iwl_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 addon_low = addon & iwl_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & iwl_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)) + + (addon & iwl_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << priv->hw_params.beacon_time_tsf_bits); + } else + res += (1 << priv->hw_params.beacon_time_tsf_bits); + + return cpu_to_le32(res); +} +EXPORT_SYMBOL(iwl_add_beacon_time); + #ifdef CONFIG_PM int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) @@ -2908,6 +2859,7 @@ int iwl_pci_resume(struct pci_dev *pdev) { struct iwl_priv *priv = pci_get_drvdata(pdev); int ret; + bool hw_rfkill = false; /* * We disable the RETRY_TIMEOUT register (0x41) to keep @@ -2922,6 +2874,17 @@ int iwl_pci_resume(struct pci_dev *pdev) pci_restore_state(pdev); iwl_enable_interrupts(priv); + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rfkill = true; + + if (hw_rfkill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill); + return 0; } EXPORT_SYMBOL(iwl_pci_resume); diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 31775bd9c36170faa839da119456fd4cf747a385..e9d23f2f869d8c72a81c00b4890e215518e058ff 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -79,6 +79,8 @@ struct iwl_cmd; .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ .driver_data = (kernel_ulong_t)&(cfg) +#define TIME_UNIT 1024 + #define IWL_SKU_G 0x1 #define IWL_SKU_A 0x2 #define IWL_SKU_N 0x8 @@ -123,6 +125,8 @@ struct iwl_debugfs_ops { size_t count, loff_t *ppos); ssize_t (*general_stats_read)(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); + ssize_t (*bt_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); }; struct iwl_temp_ops { @@ -173,7 +177,8 @@ struct iwl_lib_ops { void (*dump_nic_error_log)(struct iwl_priv *priv); void (*dump_csr)(struct iwl_priv *priv); int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display); - int (*set_channel_switch)(struct iwl_priv *priv, u16 channel); + int (*set_channel_switch)(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch); /* power management */ struct iwl_apm_ops apm_ops; @@ -193,6 +198,7 @@ struct iwl_lib_ops { /* station management */ int (*manage_ibss_station)(struct iwl_priv *priv, struct ieee80211_vif *vif, bool add); + int (*update_bcast_station)(struct iwl_priv *priv); /* recover from tx queue stall */ void (*recover_from_tx_stall)(unsigned long data); /* check for plcp health */ @@ -201,6 +207,9 @@ struct iwl_lib_ops { /* check for ack health */ bool (*check_ack_health)(struct iwl_priv *priv, struct iwl_rx_packet *pkt); + int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control); + void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); + struct iwl_debugfs_ops debugfs_ops; }; @@ -325,7 +334,10 @@ struct iwl_cfg { const bool ucode_tracing; const bool sensitivity_calib_by_driver; const bool chain_noise_calib_by_driver; - u8 scan_antennas[IEEE80211_NUM_BANDS]; + u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; + const bool need_dc_calib; + const bool bt_statistics; }; /*************************** @@ -343,6 +355,9 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv); int iwl_full_rxon_required(struct iwl_priv *priv); void iwl_set_rxon_chain(struct iwl_priv *priv); int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); +void iwl_set_flags_for_band(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif); u8 iwl_get_single_channel_number(struct iwl_priv *priv, enum ieee80211_band band); void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); @@ -350,6 +365,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, struct ieee80211_sta_ht_cap *sta_ht_inf); void iwl_connection_init_rx_config(struct iwl_priv *priv, struct ieee80211_vif *vif); +void iwl_set_rate(struct iwl_priv *priv); int iwl_set_decrypted_flag(struct iwl_priv *priv, struct ieee80211_hdr *hdr, u32 decrypt_res, @@ -364,7 +380,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes); -int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb); int iwl_commit_rxon(struct iwl_priv *priv); int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); @@ -447,20 +462,11 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, int iwl_rx_queue_space(const struct iwl_rx_queue *q); void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); /* Handlers */ -void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); -bool iwl_good_plcp_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); -bool iwl_good_ack_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt); -void iwl_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); +void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); /* TX helpers */ @@ -474,8 +480,6 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, dma_addr_t addr, u16 len, u8 reset, u8 pad); int iwl_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq); -void iwl_free_tfds_in_queue(struct iwl_priv *priv, - int sta_id, int tid, int freed); void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, int slots_num, u32 txq_id); @@ -495,7 +499,7 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); -u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx); +u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) { @@ -526,9 +530,9 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); void iwl_bg_start_internal_scan(struct work_struct *work); void iwl_internal_short_hw_scan(struct iwl_priv *priv); -int iwl_force_reset(struct iwl_priv *priv, int mode); +int iwl_force_reset(struct iwl_priv *priv, int mode, bool external); u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, - const u8 *ie, int ie_len, int left); + const u8 *ta, const u8 *ie, int ie_len, int left); void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); u16 iwl_get_active_dwell_time(struct iwl_priv *priv, enum ieee80211_band band, @@ -595,6 +599,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) } void iwl_bg_monitor_recover(unsigned long data); +u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval); +__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval); #ifdef CONFIG_PM int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); @@ -693,7 +700,6 @@ extern void iwl_rf_kill_ct_config(struct iwl_priv *priv); extern void iwl_send_bt_config(struct iwl_priv *priv); extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear); -extern int iwl_verify_ucode(struct iwl_priv *priv); extern int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_link_quality_cmd *lq, u8 flags, bool init); void iwl_apm_stop(struct iwl_priv *priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 254c35ae8b38b583472df5254a3b50c36e83825c..ecf98e7ac4edac798b90bc3db4116ff1a7b923dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -298,6 +298,7 @@ #define CSR_HW_REV_TYPE_1000 (0x0000060) #define CSR_HW_REV_TYPE_6x00 (0x0000070) #define CSR_HW_REV_TYPE_6x50 (0x0000080) +#define CSR_HW_REV_TYPE_6x50g2 (0x0000084) #define CSR_HW_REV_TYPE_6x00g2 (0x00000B0) #define CSR_HW_REV_TYPE_NONE (0x00000F0) diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 9659c5d01df9331b87d4e458bb19769a87bef8d8..e96a1bb12783db78efc0d07a2794599525f9f830 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -106,27 +106,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .open = iwl_dbgfs_open_file_generic, \ }; -int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) -{ - int p = 0; - - p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", - le32_to_cpu(priv->statistics.flag)); - if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK) - p += scnprintf(buf + p, bufsz - p, - "\tStatistics have been cleared\n"); - p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", - (le32_to_cpu(priv->statistics.flag) & - UCODE_STATISTICS_FREQUENCY_MSK) - ? "2.4 GHz" : "5.2 GHz"); - p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", - (le32_to_cpu(priv->statistics.flag) & - UCODE_STATISTICS_NARROW_BAND_MSK) - ? "enabled" : "disabled"); - return p; -} -EXPORT_SYMBOL(iwl_dbgfs_statistics_flag); - static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -330,45 +309,35 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, for (i = 0; i < max_sta; i++) { station = &priv->stations[i]; - if (station->used) { - pos += scnprintf(buf + pos, bufsz - pos, - "station %d:\ngeneral data:\n", i+1); - pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n", - station->sta.sta.sta_id); - pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n", - station->sta.mode); - pos += scnprintf(buf + pos, bufsz - pos, - "flags: 0x%x\n", - station->sta.station_flags_msk); - pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "seq_num\t\ttxq_id"); - pos += scnprintf(buf + pos, bufsz - pos, - "\tframe_count\twait_for_ba\t"); - pos += scnprintf(buf + pos, bufsz - pos, - "start_idx\tbitmap0\t"); - pos += scnprintf(buf + pos, bufsz - pos, - "bitmap1\trate_n_flags"); - pos += scnprintf(buf + pos, bufsz - pos, "\n"); + if (!station->used) + continue; + pos += scnprintf(buf + pos, bufsz - pos, + "station %d - addr: %pM, flags: %#x\n", + i, station->sta.sta.addr, + station->sta.station_flags_msk); + pos += scnprintf(buf + pos, bufsz - pos, + "TID\tseq_num\ttxq_id\tframes\ttfds\t"); + pos += scnprintf(buf + pos, bufsz - pos, + "start_idx\tbitmap\t\t\trate_n_flags\n"); - for (j = 0; j < MAX_TID_COUNT; j++) { - pos += scnprintf(buf + pos, bufsz - pos, - "[%d]:\t\t%u", j, - station->tid[j].seq_number); - pos += scnprintf(buf + pos, bufsz - pos, - "\t%u\t\t%u\t\t%u\t\t", - station->tid[j].agg.txq_id, - station->tid[j].agg.frame_count, - station->tid[j].agg.wait_for_ba); + for (j = 0; j < MAX_TID_COUNT; j++) { + pos += scnprintf(buf + pos, bufsz - pos, + "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", + j, station->tid[j].seq_number, + station->tid[j].agg.txq_id, + station->tid[j].agg.frame_count, + station->tid[j].tfds_in_queue, + station->tid[j].agg.start_idx, + station->tid[j].agg.bitmap, + station->tid[j].agg.rate_n_flags); + + if (station->tid[j].agg.wait_for_ba) pos += scnprintf(buf + pos, bufsz - pos, - "%u\t%llu\t%u", - station->tid[j].agg.start_idx, - (unsigned long long)station->tid[j].agg.bitmap, - station->tid[j].agg.rate_n_flags); - pos += scnprintf(buf + pos, bufsz - pos, "\n"); - } + " - waitforba"); pos += scnprintf(buf + pos, bufsz - pos, "\n"); } + + pos += scnprintf(buf + pos, bufsz - pos, "\n"); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); @@ -1049,8 +1018,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, rxq->write); pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", rxq->free_count); - pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "closed_rb_num: Not Allocated\n"); + } return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -1293,7 +1267,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + struct iwl_priv *priv = file->private_data; int pos = 0; char buf[128]; const size_t bufsz = sizeof(buf); @@ -1343,7 +1317,7 @@ static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + struct iwl_priv *priv = file->private_data; int len = 0; char buf[20]; @@ -1355,7 +1329,7 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + struct iwl_priv *priv = file->private_data; int len = 0; char buf[20]; @@ -1368,7 +1342,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + struct iwl_priv *priv = file->private_data; char *buf; int pos = 0; ssize_t ret = -EFAULT; @@ -1430,7 +1404,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + struct iwl_priv *priv = file->private_data; int pos = 0; char buf[12]; const size_t bufsz = sizeof(buf); @@ -1456,10 +1430,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, return -EFAULT; if (sscanf(buf, "%d", &plcp) != 1) return -EINVAL; - if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || + if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) priv->cfg->plcp_delta_threshold = - IWL_MAX_PLCP_ERR_THRESHOLD_DEF; + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; else priv->cfg->plcp_delta_threshold = plcp; return count; @@ -1513,7 +1487,7 @@ static ssize_t iwl_dbgfs_force_reset_write(struct file *file, switch (reset) { case IWL_RF_RESET: case IWL_FW_RESET: - ret = iwl_force_reset(priv, reset); + ret = iwl_force_reset(priv, reset, true); break; default: return -EINVAL; @@ -1521,6 +1495,40 @@ static ssize_t iwl_dbgfs_force_reset_write(struct file *file, return ret ? ret : count; } +static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &flush) != 1) + return -EINVAL; + + if (iwl_is_rfkill(priv)) + return -EFAULT; + + priv->cfg->ops->lib->dev_txfifo_flush(priv, IWL_DROP_ALL); + + return count; +} + +static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + + return priv->cfg->ops->lib->debugfs_ops.bt_stats_read(file, + user_buf, count, ppos); +} + DEBUGFS_READ_FILE_OPS(rx_statistics); DEBUGFS_READ_FILE_OPS(tx_statistics); DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); @@ -1542,6 +1550,8 @@ DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); DEBUGFS_READ_WRITE_FILE_OPS(force_reset); DEBUGFS_READ_FILE_OPS(rxon_flags); DEBUGFS_READ_FILE_OPS(rxon_filter_flags); +DEBUGFS_WRITE_FILE_OPS(txfifo_flush); +DEBUGFS_READ_FILE_OPS(ucode_bt_stats); /* * Create the debugfs files and directories @@ -1600,6 +1610,8 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + if (priv->cfg->ops->lib->dev_txfifo_flush) + DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); if (priv->cfg->sensitivity_calib_by_driver) DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); @@ -1607,6 +1619,8 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); if (priv->cfg->ucode_tracing) DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); + if (priv->cfg->bt_statistics) + DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); if (priv->cfg->sensitivity_calib_by_driver) diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index f3f3473c5c7ec852bf73657596354fe99d473e6c..f35bcad56e36bd68779edb3acf930d6b7595359a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -48,25 +48,6 @@ #include "iwl-power.h" #include "iwl-agn-rs.h" -/* configuration for the iwl4965 */ -extern struct iwl_cfg iwl4965_agn_cfg; -extern struct iwl_cfg iwl5300_agn_cfg; -extern struct iwl_cfg iwl5100_agn_cfg; -extern struct iwl_cfg iwl5350_agn_cfg; -extern struct iwl_cfg iwl5100_bgn_cfg; -extern struct iwl_cfg iwl5100_abg_cfg; -extern struct iwl_cfg iwl5150_agn_cfg; -extern struct iwl_cfg iwl5150_abg_cfg; -extern struct iwl_cfg iwl6000g2a_2agn_cfg; -extern struct iwl_cfg iwl6000i_2agn_cfg; -extern struct iwl_cfg iwl6000i_2abg_cfg; -extern struct iwl_cfg iwl6000i_2bg_cfg; -extern struct iwl_cfg iwl6000_3agn_cfg; -extern struct iwl_cfg iwl6050_2agn_cfg; -extern struct iwl_cfg iwl6050_2abg_cfg; -extern struct iwl_cfg iwl1000_bgn_cfg; -extern struct iwl_cfg iwl1000_bg_cfg; - struct iwl_tx_queue; /* CT-KILL constants */ @@ -133,8 +114,8 @@ struct iwl_cmd_meta { * structure is stored at the end of the shared queue memory. */ u32 flags; - DECLARE_PCI_UNMAP_ADDR(mapping) - DECLARE_PCI_UNMAP_LEN(len) + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(len); }; /* @@ -157,11 +138,11 @@ struct iwl_queue { * space more than this */ int high_mark; /* high watermark, stop queue if free * space less than this */ -} __attribute__ ((packed)); +} __packed; /* One for each TFD */ struct iwl_tx_info { - struct sk_buff *skb[IWL_NUM_OF_TBS - 1]; + struct sk_buff *skb; }; /** @@ -343,8 +324,8 @@ struct iwl_device_cmd { struct iwl_tx_cmd tx; struct iwl6000_channel_switch_cmd chswitch; u8 payload[DEF_CMD_PAYLOAD_SIZE]; - } __attribute__ ((packed)) cmd; -} __attribute__ ((packed)); + } __packed cmd; +} __packed; #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) @@ -367,7 +348,7 @@ struct iwl_host_cmd { /** * struct iwl_rx_queue - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) - * @dma_addr: bus address of buffer of receive buffer descriptors (rbd) + * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free @@ -381,7 +362,7 @@ struct iwl_host_cmd { */ struct iwl_rx_queue { __le32 *bd; - dma_addr_t dma_addr; + dma_addr_t bd_dma; struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; @@ -433,7 +414,7 @@ struct iwl_ht_agg { struct iwl_tid_data { - u16 seq_number; + u16 seq_number; /* agn only */ u16 tfds_in_queue; struct iwl_ht_agg agg; }; @@ -583,6 +564,14 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_INIT_DATA = 4, IWL_UCODE_TLV_BOOT = 5, IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ + IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, + IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, + IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, + IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11, + IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12, + IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, + IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, + IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, }; struct iwl_ucode_tlv { @@ -590,7 +579,7 @@ struct iwl_ucode_tlv { __le16 alternative; /* see comment */ __le32 length; /* not including type/length fields */ u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define IWL_TLV_UCODE_MAGIC 0x0a4c5749 @@ -675,6 +664,7 @@ struct iwl_sensitivity_ranges { * @sw_crypto: 0 for hw, 1 for sw * @max_xxx_size: for ucode uses * @ct_kill_threshold: temperature threshold + * @beacon_time_tsf_bits: number of valid tsf bits for beacon time * @calib_init_cfg: setup initial calibrations for the hw * @struct iwl_sensitivity_ranges: range of sensitivity values */ @@ -701,6 +691,7 @@ struct iwl_hw_params { u32 ct_kill_threshold; /* value in hw-dependent units */ u32 ct_kill_exit_threshold; /* value in hw-dependent units */ /* for 1000, 6000 series and up */ + u16 beacon_time_tsf_bits; u32 calib_init_cfg; const struct iwl_sensitivity_ranges *sens; }; @@ -1047,11 +1038,12 @@ struct iwl_event_log { * This is the threshold value of plcp error rate per 100mSecs. It is * used to set and check for the validity of plcp_delta. */ -#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0) +#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1) #define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) #define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) +#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0) #define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) @@ -1075,6 +1067,20 @@ struct iwl_force_reset { unsigned long last_force_reset_jiffies; }; +/* extend beacon time format bit shifting */ +/* + * for _3945 devices + * bits 31:24 - extended + * bits 23:0 - interval + */ +#define IWL3945_EXT_BEACON_TIME_POS 24 +/* + * for _agn devices + * bits 31:22 - extended + * bits 21:0 - interval + */ +#define IWLAGN_EXT_BEACON_TIME_POS 22 + struct iwl_priv { /* ieee device used by generic ieee processing code */ @@ -1109,7 +1115,7 @@ struct iwl_priv { /* force reset */ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; - /* we allocate array of iwl4965_channel_info for NIC's valid channels. + /* we allocate array of iwl_channel_info for NIC's valid channels. * Access via channel # using indirect index array */ struct iwl_channel_info *channel_info; /* channel info array */ u8 channel_count; /* # of channels */ @@ -1127,6 +1133,7 @@ struct iwl_priv { void *scan_cmd; enum ieee80211_band scan_band; struct cfg80211_scan_request *scan_request; + struct ieee80211_vif *scan_vif; bool is_internal_short_scan; u8 scan_tx_ant[IEEE80211_NUM_BANDS]; u8 mgmt_tx_ant; @@ -1147,6 +1154,9 @@ struct iwl_priv { u32 hw_wa_rev; u8 rev_id; + /* EEPROM MAC addresses */ + struct mac_address addresses[2]; + /* uCode images, save to reload in case of failure */ int fw_index; /* firmware we're trying to load */ u32 ucode_ver; /* version of ucode, copy of @@ -1174,7 +1184,7 @@ struct iwl_priv { struct iwl_switch_rxon switch_rxon; /* 1st responses from initialize and runtime uCode images. - * 4965's initialize alive response contains some calibration data. */ + * _agn's initialize alive response contains some calibration data. */ struct iwl_init_alive_resp card_alive_init; struct iwl_alive_resp card_alive; @@ -1188,7 +1198,9 @@ struct iwl_priv { u8 start_calib; struct iwl_sensitivity_data sensitivity_data; struct iwl_chain_noise_data chain_noise_data; + bool enhance_sensitivity_table; __le16 sensitivity_tbl[HD_TABLE_SIZE]; + __le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES]; struct iwl_ht_config current_ht_config; @@ -1220,18 +1232,12 @@ struct iwl_priv { struct iwl_power_mgr power_data; struct iwl_tt_mgmt thermal_throttle; - struct iwl_notif_statistics statistics; -#ifdef CONFIG_IWLWIFI_DEBUG - struct iwl_notif_statistics accum_statistics; - struct iwl_notif_statistics delta_statistics; - struct iwl_notif_statistics max_delta; -#endif - /* context information */ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ - u8 mac_addr[ETH_ALEN]; - /*station table variables */ + /* station table variables */ + + /* Note: if lock and sta_lock are needed, lock must be acquired first */ spinlock_t sta_lock; int num_stations; struct iwl_station_entry stations[IWL_STATION_COUNT]; @@ -1273,7 +1279,7 @@ struct iwl_priv { struct delayed_work rfkill_poll; struct iwl3945_notif_statistics statistics; -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_DEBUGFS struct iwl3945_notif_statistics accum_statistics; struct iwl3945_notif_statistics delta_statistics; struct iwl3945_notif_statistics max_delta; @@ -1315,6 +1321,28 @@ struct iwl_priv { bool last_phy_res_valid; struct completion firmware_loading_complete; + + u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; + u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; + + /* + * chain noise reset and gain commands are the + * two extra calibration commands follows the standard + * phy calibration commands + */ + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + + struct iwl_notif_statistics statistics; + struct iwl_bt_notif_statistics statistics_bt; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct iwl_notif_statistics accum_statistics; + struct iwl_notif_statistics delta_statistics; + struct iwl_notif_statistics max_delta; + struct iwl_bt_notif_statistics accum_statistics_bt; + struct iwl_bt_notif_statistics delta_statistics_bt; + struct iwl_bt_notif_statistics max_delta_bt; +#endif } _agn; #endif }; @@ -1336,6 +1364,7 @@ struct iwl_priv { struct work_struct ct_enter; struct work_struct ct_exit; struct work_struct start_internal_scan; + struct work_struct tx_flush; struct tasklet_struct irq_tasklet; @@ -1353,9 +1382,7 @@ struct iwl_priv { /* debugging info */ u32 debug_level; /* per device debugging will override global iwl_debug_level if set */ - u32 framecnt_to_us; - atomic_t restrict_refcnt; - bool disable_ht40; +#endif /* CONFIG_IWLWIFI_DEBUG */ #ifdef CONFIG_IWLWIFI_DEBUGFS /* debugfs */ u16 tx_traffic_idx; @@ -1364,8 +1391,8 @@ struct iwl_priv { u8 *rx_traffic; struct dentry *debugfs_dir; u32 dbgfs_sram_offset, dbgfs_sram_len; + bool disable_ht40; #endif /* CONFIG_IWLWIFI_DEBUGFS */ -#endif /* CONFIG_IWLWIFI_DEBUG */ struct work_struct txpower_work; u32 disable_sens_cal; @@ -1419,9 +1446,9 @@ static inline u32 iwl_get_debug_level(struct iwl_priv *priv) static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv, int txq_id, int idx) { - if (priv->txq[txq_id].txb[idx].skb[0]) + if (priv->txq[txq_id].txb[idx].skb) return (struct ieee80211_hdr *)priv->txq[txq_id]. - txb[idx].skb[0]->data; + txb[idx].skb->data; return NULL; } diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index ee11452519e6490a52b8176e445e40edede91e27..a45d02e555cfffbcd3d174deb5dbdc4352ff6bef 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -629,6 +629,9 @@ int iwl_eeprom_check_version(struct iwl_priv *priv) calib_ver < priv->cfg->eeprom_calib_ver) goto err; + IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", + eeprom_ver, calib_ver); + return 0; err: IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 95aa202c85e315a1737301863ebb880c40150c88..a4772aff51fe0479fc5e522a4a1c59df18a68de2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -118,7 +118,7 @@ enum { struct iwl_eeprom_channel { u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ -} __attribute__ ((packed)); +} __packed; /** * iwl_eeprom_enhanced_txpwr structure @@ -144,7 +144,7 @@ struct iwl_eeprom_enhanced_txpwr { s8 reserved; s8 mimo2_max; s8 mimo3_max; -} __attribute__ ((packed)); +} __packed; /* 3945 Specific */ #define EEPROM_3945_EEPROM_VERSION (0x2f) @@ -276,6 +276,10 @@ struct iwl_eeprom_enhanced_txpwr { #define EEPROM_6050_TX_POWER_VERSION (4) #define EEPROM_6050_EEPROM_VERSION (0x532) +/* 6x50g2 Specific */ +#define EEPROM_6050G2_TX_POWER_VERSION (6) +#define EEPROM_6050G2_EEPROM_VERSION (0x553) + /* 6x00g2 Specific */ #define EEPROM_6000G2_TX_POWER_VERSION (6) #define EEPROM_6000G2_EEPROM_VERSION (0x709) @@ -312,7 +316,7 @@ struct iwl_eeprom_calib_measure { u8 gain_idx; /* Index into gain table */ u8 actual_pow; /* Measured RF output power, half-dBm */ s8 pa_det; /* Power amp detector level (not used) */ -} __attribute__ ((packed)); +} __packed; /* @@ -328,7 +332,7 @@ struct iwl_eeprom_calib_ch_info { struct iwl_eeprom_calib_measure measurements[EEPROM_TX_POWER_TX_CHAINS] [EEPROM_TX_POWER_MEASUREMENTS]; -} __attribute__ ((packed)); +} __packed; /* * txpower subband info. @@ -345,7 +349,7 @@ struct iwl_eeprom_calib_subband_info { u8 ch_to; /* channel number of highest channel in subband */ struct iwl_eeprom_calib_ch_info ch1; struct iwl_eeprom_calib_ch_info ch2; -} __attribute__ ((packed)); +} __packed; /* @@ -374,7 +378,7 @@ struct iwl_eeprom_calib_info { __le16 voltage; /* signed */ struct iwl_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS]; -} __attribute__ ((packed)); +} __packed; #define ADDRESS_MSK 0x0000FFFF @@ -398,6 +402,7 @@ struct iwl_eeprom_calib_info { #define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ #define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ #define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */ +#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ /* The following masks are to be applied on EEPROM_RADIO_CONFIG */ #define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index 113c3669b9ce407e9165ee4b40b88436b294a514..55b8370bc6d4a6ff7cfe57bad3a18f2cfe094e43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -398,12 +398,7 @@ */ #define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) -#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24) -#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16) - -#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \ - (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \ - FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl)) +#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) /* Tx service channels */ #define FH_SRVC_CHNL (9) @@ -449,7 +444,7 @@ struct iwl_rb_status { __le16 finished_rb_num; __le16 finished_fr_nam; __le32 __unused; /* 3945 only */ -} __attribute__ ((packed)); +} __packed; #define TFD_QUEUE_SIZE_MAX (256) @@ -475,7 +470,7 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) struct iwl_tfd_tb { __le32 lo; __le16 hi_n_len; -} __attribute__((packed)); +} __packed; /** * struct iwl_tfd @@ -510,7 +505,7 @@ struct iwl_tfd { u8 num_tbs; struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; __le32 __pad; -} __attribute__ ((packed)); +} __packed; /* Keep Warm Size */ #define IWL_KW_SIZE 0x1000 /* 4k */ diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c index 51f89e7ba6814b26e95f128d39315dc637dc5651..258d059ef41f44ae01e4199327133655b5e6df1e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c @@ -49,6 +49,7 @@ const char *get_cmd_string(u8 cmd) IWL_CMD(REPLY_ADD_STA); IWL_CMD(REPLY_REMOVE_STA); IWL_CMD(REPLY_REMOVE_ALL_STA); + IWL_CMD(REPLY_TXFIFO_FLUSH); IWL_CMD(REPLY_WEPKEY); IWL_CMD(REPLY_3945_RX); IWL_CMD(REPLY_TX); diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h index 3ff6b9d25a10665397c5b87285207d5591293993..621abe3c5afce3fc2b73b66e37c82c7b4d8fcf66 100644 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -92,6 +92,11 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev, static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc) { + if (!desc->len) { + desc->v_addr = NULL; + return -EINVAL; + } + desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr, GFP_KERNEL); return (desc->v_addr != NULL) ? 0 : -ENOMEM; @@ -170,4 +175,26 @@ static inline void iwl_enable_interrupts(struct iwl_priv *priv) iwl_write32(priv, CSR_INT_MASK, priv->inta_mask); } +/** + * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv, + u16 tsf_bits) +{ + return (1 << tsf_bits) - 1; +} + +/** + * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv, + u16 tsf_bits) +{ + return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; +} + #endif /* __iwl_helpers_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 0a5d7cf25196031d635c55f8d5ab23a8ca16e0cf..79773e353baaa56478a6d173fdc5f0f70cf837fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -175,7 +175,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv) INIT_LIST_HEAD(&rxq->rx_used); /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr, + rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; @@ -199,32 +199,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv) err_rb: dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->dma_addr); + rxq->bd_dma); err_bd: return -ENOMEM; } EXPORT_SYMBOL(iwl_rx_queue_alloc); -void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) - -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacon_notif *missed_beacon; - - missed_beacon = &pkt->u.missed_beacon; - if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > - priv->missed_beacon_threshold) { - IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n", - le32_to_cpu(missed_beacon->consecutive_missed_beacons), - le32_to_cpu(missed_beacon->total_missed_becons), - le32_to_cpu(missed_beacon->num_recvd_beacons), - le32_to_cpu(missed_beacon->num_expected_beacons)); - if (!test_bit(STATUS_SCANNING, &priv->status)) - iwl_init_sensitivity(priv); - } -} -EXPORT_SYMBOL(iwl_rx_missed_beacon_notif); void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) @@ -243,161 +223,6 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, } EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif); - - -/* Calculate noise level, based on measurements during network silence just - * before arriving beacon. This measurement can be done only if we know - * exactly when to expect beacons, therefore only when we're associated. */ -static void iwl_rx_calc_noise(struct iwl_priv *priv) -{ - struct statistics_rx_non_phy *rx_info - = &(priv->statistics.rx.general); - int num_active_rx = 0; - int total_silence = 0; - int bcn_silence_a = - le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; - int bcn_silence_b = - le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; - int bcn_silence_c = - le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; - int last_rx_noise; - - if (bcn_silence_a) { - total_silence += bcn_silence_a; - num_active_rx++; - } - if (bcn_silence_b) { - total_silence += bcn_silence_b; - num_active_rx++; - } - if (bcn_silence_c) { - total_silence += bcn_silence_c; - num_active_rx++; - } - - /* Average among active antennas */ - if (num_active_rx) - last_rx_noise = (total_silence / num_active_rx) - 107; - else - last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; - - IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", - bcn_silence_a, bcn_silence_b, bcn_silence_c, - last_rx_noise); -} - -#ifdef CONFIG_IWLWIFI_DEBUG -/* - * based on the assumption of all statistics counter are in DWORD - * FIXME: This function is for debugging, do not deal with - * the case of counters roll-over. - */ -static void iwl_accumulative_statistics(struct iwl_priv *priv, - __le32 *stats) -{ - int i; - __le32 *prev_stats; - u32 *accum_stats; - u32 *delta, *max_delta; - - prev_stats = (__le32 *)&priv->statistics; - accum_stats = (u32 *)&priv->accum_statistics; - delta = (u32 *)&priv->delta_statistics; - max_delta = (u32 *)&priv->max_delta; - - for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics); - i += sizeof(__le32), stats++, prev_stats++, delta++, - max_delta++, accum_stats++) { - if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { - *delta = (le32_to_cpu(*stats) - - le32_to_cpu(*prev_stats)); - *accum_stats += *delta; - if (*delta > *max_delta) - *max_delta = *delta; - } - } - - /* reset accumulative statistics for "no-counter" type statistics */ - priv->accum_statistics.general.temperature = - priv->statistics.general.temperature; - priv->accum_statistics.general.temperature_m = - priv->statistics.general.temperature_m; - priv->accum_statistics.general.ttl_timestamp = - priv->statistics.general.ttl_timestamp; - priv->accum_statistics.tx.tx_power.ant_a = - priv->statistics.tx.tx_power.ant_a; - priv->accum_statistics.tx.tx_power.ant_b = - priv->statistics.tx.tx_power.ant_b; - priv->accum_statistics.tx.tx_power.ant_c = - priv->statistics.tx.tx_power.ant_c; -} -#endif - -#define REG_RECALIB_PERIOD (60) - -/** - * iwl_good_plcp_health - checks for plcp error. - * - * When the plcp error is exceeding the thresholds, reset the radio - * to improve the throughput. - */ -bool iwl_good_plcp_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt) -{ - bool rc = true; - int combined_plcp_delta; - unsigned int plcp_msec; - unsigned long plcp_received_jiffies; - - /* - * check for plcp_err and trigger radio reset if it exceeds - * the plcp error threshold plcp_delta. - */ - plcp_received_jiffies = jiffies; - plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - - (long) priv->plcp_jiffies); - priv->plcp_jiffies = plcp_received_jiffies; - /* - * check to make sure plcp_msec is not 0 to prevent division - * by zero. - */ - if (plcp_msec) { - combined_plcp_delta = - (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) - - le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) + - (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) - - le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); - - if ((combined_plcp_delta > 0) && - ((combined_plcp_delta * 100) / plcp_msec) > - priv->cfg->plcp_delta_threshold) { - /* - * if plcp_err exceed the threshold, - * the following data is printed in csv format: - * Text: plcp_err exceeded %d, - * Received ofdm.plcp_err, - * Current ofdm.plcp_err, - * Received ofdm_ht.plcp_err, - * Current ofdm_ht.plcp_err, - * combined_plcp_delta, - * plcp_msec - */ - IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " - "%u, %u, %u, %u, %d, %u mSecs\n", - priv->cfg->plcp_delta_threshold, - le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), - le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), - le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), - le32_to_cpu( - priv->statistics.rx.ofdm_ht.plcp_err), - combined_plcp_delta, plcp_msec); - rc = false; - } - } - return rc; -} -EXPORT_SYMBOL(iwl_good_plcp_health); - void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt) { @@ -413,7 +238,7 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, */ IWL_ERR(priv, "low ack count detected, " "restart firmware\n"); - if (!iwl_force_reset(priv, IWL_FW_RESET)) + if (!iwl_force_reset(priv, IWL_FW_RESET, false)) return; } } @@ -424,76 +249,13 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, * high plcp error detected * reset Radio */ - iwl_force_reset(priv, IWL_RF_RESET); + iwl_force_reset(priv, IWL_RF_RESET, false); } } } } EXPORT_SYMBOL(iwl_recover_from_statistics); -void iwl_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - int change; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - - IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", - (int)sizeof(priv->statistics), - le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); - - change = ((priv->statistics.general.temperature != - pkt->u.stats.general.temperature) || - ((priv->statistics.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK) != - (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); - -#ifdef CONFIG_IWLWIFI_DEBUG - iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); -#endif - iwl_recover_from_statistics(priv, pkt); - - memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); - - set_bit(STATUS_STATISTICS, &priv->status); - - /* Reschedule the statistics timer to occur in - * REG_RECALIB_PERIOD seconds to ensure we get a - * thermal update even if the uCode doesn't give - * us one */ - mod_timer(&priv->statistics_periodic, jiffies + - msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); - - if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && - (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { - iwl_rx_calc_noise(priv); - queue_work(priv->workqueue, &priv->run_time_calib_work); - } - if (priv->cfg->ops->lib->temp_ops.temperature && change) - priv->cfg->ops->lib->temp_ops.temperature(priv); -} -EXPORT_SYMBOL(iwl_rx_statistics); - -void iwl_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { -#ifdef CONFIG_IWLWIFI_DEBUG - memset(&priv->accum_statistics, 0, - sizeof(struct iwl_notif_statistics)); - memset(&priv->delta_statistics, 0, - sizeof(struct iwl_notif_statistics)); - memset(&priv->max_delta, 0, - sizeof(struct iwl_notif_statistics)); -#endif - IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); - } - iwl_rx_statistics(priv, rxb); -} -EXPORT_SYMBOL(iwl_reply_statistics); - /* * returns non-zero if packet should be dropped */ diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 386c5f96eff81c6467f026bc970c39734b853ffb..b0c6b04739013afec3ab7dcaa015bfeeb5e81b99 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -333,7 +333,8 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, goto out_unlock; } - if (test_bit(STATUS_SCANNING, &priv->status)) { + if (test_bit(STATUS_SCANNING, &priv->status) && + !priv->is_internal_short_scan) { IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); ret = -EAGAIN; goto out_unlock; @@ -348,8 +349,16 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, /* mac80211 will only ask for one band at a time */ priv->scan_band = req->channels[0]->band; priv->scan_request = req; + priv->scan_vif = vif; - ret = iwl_scan_initiate(priv, vif); + /* + * If an internal scan is in progress, just set + * up the scan_request as per above. + */ + if (priv->is_internal_short_scan) + ret = 0; + else + ret = iwl_scan_initiate(priv, vif); IWL_DEBUG_MAC80211(priv, "leave\n"); @@ -420,11 +429,10 @@ void iwl_bg_scan_check(struct work_struct *data) return; mutex_lock(&priv->mutex); - if (test_bit(STATUS_SCANNING, &priv->status) || - test_bit(STATUS_SCAN_ABORTING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting " - "adapter (%dms)\n", - jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); + if (test_bit(STATUS_SCANNING, &priv->status) && + !test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n", + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) iwl_send_scan_abort(priv); @@ -438,7 +446,7 @@ EXPORT_SYMBOL(iwl_bg_scan_check); */ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, - const u8 *ies, int ie_len, int left) + const u8 *ta, const u8 *ies, int ie_len, int left) { int len = 0; u8 *pos = NULL; @@ -451,7 +459,7 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); memcpy(frame->da, iwl_bcast_addr, ETH_ALEN); - memcpy(frame->sa, priv->mac_addr, ETH_ALEN); + memcpy(frame->sa, ta, ETH_ALEN); memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN); frame->seq_ctrl = 0; @@ -489,12 +497,11 @@ void iwl_bg_abort_scan(struct work_struct *work) !test_bit(STATUS_GEO_CONFIGURED, &priv->status)) return; - mutex_lock(&priv->mutex); - - cancel_delayed_work_sync(&priv->scan_check); - set_bit(STATUS_SCAN_ABORTING, &priv->status); - iwl_send_scan_abort(priv); + cancel_delayed_work(&priv->scan_check); + mutex_lock(&priv->mutex); + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) + iwl_send_scan_abort(priv); mutex_unlock(&priv->mutex); } EXPORT_SYMBOL(iwl_bg_abort_scan); @@ -514,7 +521,30 @@ void iwl_bg_scan_completed(struct work_struct *work) priv->is_internal_short_scan = false; IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); internal = true; + } else { + priv->scan_request = NULL; + priv->scan_vif = NULL; } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + + if (internal && priv->scan_request) + iwl_scan_initiate(priv, priv->scan_vif); + + /* Since setting the TXPOWER may have been deferred while + * performing the scan, fire one off */ + iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + if (memcmp(&priv->active_rxon, + &priv->staging_rxon, sizeof(priv->staging_rxon))) + iwlcore_commit_rxon(priv); + + out: mutex_unlock(&priv->mutex); /* @@ -524,15 +554,6 @@ void iwl_bg_scan_completed(struct work_struct *work) */ if (!internal) ieee80211_scan_completed(priv->hw, false); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - /* Since setting the TXPOWER may have been deferred while - * performing the scan, fire one off */ - mutex_lock(&priv->mutex); - iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); - mutex_unlock(&priv->mutex); } EXPORT_SYMBOL(iwl_bg_scan_completed); diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h index af6babee28915f1d9a0d81d5b4d8a1818134c8f5..c4ca0b5d77dae456d49b4b5454674cec24e9bdaf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h +++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h @@ -42,7 +42,7 @@ struct ieee80211_basic_report { __le64 start_time; __le16 duration; u8 map; -} __attribute__ ((packed)); +} __packed; enum { /* ieee80211_measurement_request.mode */ /* Bit 0 is reserved */ @@ -63,13 +63,13 @@ struct ieee80211_measurement_params { u8 channel; __le64 start_time; __le16 duration; -} __attribute__ ((packed)); +} __packed; struct ieee80211_info_element { u8 id; u8 len; u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct ieee80211_measurement_request { struct ieee80211_info_element ie; @@ -77,7 +77,7 @@ struct ieee80211_measurement_request { u8 mode; u8 type; struct ieee80211_measurement_params params[0]; -} __attribute__ ((packed)); +} __packed; struct ieee80211_measurement_report { struct ieee80211_info_element ie; @@ -87,6 +87,6 @@ struct ieee80211_measurement_report { union { struct ieee80211_basic_report basic[0]; } u; -} __attribute__ ((packed)); +} __packed; #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index c27c13fbb1aec26bac302623b96d089e1f13c0dd..9511f03f07e053b93af68c83bd534ddfab98122d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "iwl-dev.h" #include "iwl-core.h" @@ -54,18 +55,19 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) } } -static void iwl_process_add_sta_resp(struct iwl_priv *priv, - struct iwl_addsta_cmd *addsta, - struct iwl_rx_packet *pkt, - bool sync) +static int iwl_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_addsta_cmd *addsta, + struct iwl_rx_packet *pkt, + bool sync) { u8 sta_id = addsta->sta.sta_id; unsigned long flags; + int ret = -EIO; if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", pkt->hdr.flags); - return; + return ret; } IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", @@ -77,6 +79,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv, case ADD_STA_SUCCESS_MSK: IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); iwl_sta_ucode_activate(priv, sta_id); + ret = 0; break; case ADD_STA_NO_ROOM_IN_TABLE: IWL_ERR(priv, "Adding station %d failed, no room in table.\n", @@ -114,6 +117,8 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv, STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; } static void iwl_add_sta_callback(struct iwl_priv *priv, @@ -145,8 +150,10 @@ int iwl_send_add_sta(struct iwl_priv *priv, if (flags & CMD_ASYNC) cmd.callback = iwl_add_sta_callback; - else + else { cmd.flags |= CMD_WANT_SKB; + might_sleep(); + } cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); ret = iwl_send_cmd(priv, &cmd); @@ -156,7 +163,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, if (ret == 0) { pkt = (struct iwl_rx_packet *)cmd.reply_page; - iwl_process_add_sta_resp(priv, sta, pkt, true); + ret = iwl_process_add_sta_resp(priv, sta, pkt, true); } iwl_free_pages(priv, cmd.reply_page); @@ -311,10 +318,10 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr, struct ieee80211_sta_ht_cap *ht_info, u8 *sta_id_r) { - struct iwl_station_entry *station; unsigned long flags_spin; int ret = 0; u8 sta_id; + struct iwl_addsta_cmd sta_cmd; *sta_id_r = 0; spin_lock_irqsave(&priv->sta_lock, flags_spin); @@ -347,14 +354,15 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr, } priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; - station = &priv->stations[sta_id]; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags_spin); /* Add station to device's station table */ - ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC); + ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); if (ret) { - IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr); spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[sta_id].sta.sta.addr); priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; spin_unlock_irqrestore(&priv->sta_lock, flags_spin); @@ -488,7 +496,7 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) } static int iwl_send_remove_station(struct iwl_priv *priv, - struct iwl_station_entry *station) + const u8 *addr, int sta_id) { struct iwl_rx_packet *pkt; int ret; @@ -505,7 +513,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); rm_sta_cmd.num_sta = 1; - memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN); + memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); cmd.flags |= CMD_WANT_SKB; @@ -525,7 +533,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, switch (pkt->u.rem_sta.status) { case REM_STA_SUCCESS_MSK: spin_lock_irqsave(&priv->sta_lock, flags_spin); - iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id); + iwl_sta_ucode_deactivate(priv, sta_id); spin_unlock_irqrestore(&priv->sta_lock, flags_spin); IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); break; @@ -546,7 +554,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv, int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, const u8 *addr) { - struct iwl_station_entry *station; unsigned long flags; if (!iwl_is_ready(priv)) { @@ -592,10 +599,9 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, BUG_ON(priv->num_stations < 0); - station = &priv->stations[sta_id]; spin_unlock_irqrestore(&priv->sta_lock, flags); - return iwl_send_remove_station(priv, station); + return iwl_send_remove_station(priv, addr, sta_id); out_err: spin_unlock_irqrestore(&priv->sta_lock, flags); return -EINVAL; @@ -643,11 +649,13 @@ EXPORT_SYMBOL(iwl_clear_ucode_stations); */ void iwl_restore_stations(struct iwl_priv *priv) { - struct iwl_station_entry *station; + struct iwl_addsta_cmd sta_cmd; + struct iwl_link_quality_cmd lq; unsigned long flags_spin; int i; bool found = false; int ret; + bool send_lq; if (!iwl_is_ready(priv)) { IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n"); @@ -669,13 +677,20 @@ void iwl_restore_stations(struct iwl_priv *priv) for (i = 0; i < priv->hw_params.max_stations; i++) { if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { + memcpy(&sta_cmd, &priv->stations[i].sta, + sizeof(struct iwl_addsta_cmd)); + send_lq = false; + if (priv->stations[i].lq) { + memcpy(&lq, priv->stations[i].lq, + sizeof(struct iwl_link_quality_cmd)); + send_lq = true; + } spin_unlock_irqrestore(&priv->sta_lock, flags_spin); - station = &priv->stations[i]; - ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC); + ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); if (ret) { - IWL_ERR(priv, "Adding station %pM failed.\n", - station->sta.sta.addr); spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[i].sta.sta.addr); priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE; priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; spin_unlock_irqrestore(&priv->sta_lock, flags_spin); @@ -684,8 +699,8 @@ void iwl_restore_stations(struct iwl_priv *priv) * Rate scaling has already been initialized, send * current LQ command */ - if (station->lq) - iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true); + if (send_lq) + iwl_send_lq_cmd(priv, &lq, CMD_SYNC, true); spin_lock_irqsave(&priv->sta_lock, flags_spin); priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; } @@ -823,7 +838,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, { unsigned long flags; __le16 key_flags = 0; - int ret; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; @@ -863,11 +880,10 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); - + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); - return ret; + return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, @@ -876,7 +892,9 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, { unsigned long flags; __le16 key_flags = 0; - int ret; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); @@ -911,11 +929,10 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); - + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); - return ret; + return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, @@ -972,24 +989,16 @@ void iwl_update_tkip_key(struct iwl_priv *priv, unsigned long flags; int i; - if (sta) { - sta_id = iwl_sta_id(sta); - - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n", - sta->addr); - return; - } - } else - sta_id = priv->hw_params.bcast_sta_id; - - if (iwl_scan_cancel(priv)) { /* cancel scan failed, just live w/ bad key and rely briefly on SW decryption */ return; } + sta_id = iwl_sta_id_or_broadcast(priv, sta); + if (sta_id == IWL_INVALID_STATION) + return; + spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; @@ -1013,9 +1022,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id) { unsigned long flags; - int ret = 0; u16 key_flags; u8 keyidx; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); priv->key_mapping_key--; @@ -1062,9 +1073,10 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, spin_unlock_irqrestore(&priv->sta_lock, flags); return 0; } - ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); - return ret; + + return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } EXPORT_SYMBOL(iwl_remove_dynamic_key); @@ -1073,6 +1085,8 @@ int iwl_set_dynamic_key(struct iwl_priv *priv, { int ret; + lockdep_assert_held(&priv->mutex); + priv->key_mapping_key++; keyconf->hw_key_idx = HW_KEY_DYNAMIC; @@ -1245,6 +1259,36 @@ int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq) } EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station); +/** + * iwl_update_bcast_station - update broadcast station's LQ command + * + * Only used by iwlagn. Placed here to have all bcast station management + * code together. + */ +int iwl_update_bcast_station(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_link_quality_cmd *link_cmd; + u8 sta_id = priv->hw_params.bcast_sta_id; + + link_cmd = iwl_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + if (priv->stations[sta_id].lq) + kfree(priv->stations[sta_id].lq); + else + IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n"); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(iwl_update_bcast_station); + void iwl_dealloc_bcast_station(struct iwl_priv *priv) { unsigned long flags; @@ -1268,18 +1312,22 @@ EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station); /** * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table */ -void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) +int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) { unsigned long flags; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); /* Remove "disable" flag, to enable Tx for this TID */ spin_lock_irqsave(&priv->sta_lock, flags); priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); - iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid); @@ -1288,6 +1336,9 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, { unsigned long flags; int sta_id; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); sta_id = iwl_sta_id(sta); if (sta_id == IWL_INVALID_STATION) @@ -1299,10 +1350,10 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); - return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, - CMD_ASYNC); + return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } EXPORT_SYMBOL(iwl_sta_rx_agg_start); @@ -1311,6 +1362,9 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, { unsigned long flags; int sta_id; + struct iwl_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); sta_id = iwl_sta_id(sta); if (sta_id == IWL_INVALID_STATION) { @@ -1323,10 +1377,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); - return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, - CMD_ASYNC); + return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } EXPORT_SYMBOL(iwl_sta_rx_agg_stop); @@ -1340,9 +1394,9 @@ void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) priv->stations[sta_id].sta.sta.modify_mask = 0; priv->stations[sta_id].sta.sleep_tx_count = 0; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags); - iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); } EXPORT_SYMBOL(iwl_sta_modify_ps_wake); @@ -1357,9 +1411,9 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) STA_MODIFY_SLEEP_TX_COUNT_MSK; priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags); - iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); } EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h index dc43ebd1f1fd2eb3012c900456ec30b35b9fbbb5..d38a350ba0bd46a7d98e09810346243dea2be528 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.h +++ b/drivers/net/wireless/iwlwifi/iwl-sta.h @@ -60,6 +60,7 @@ void iwl_restore_stations(struct iwl_priv *priv); void iwl_clear_ucode_stations(struct iwl_priv *priv); int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq); void iwl_dealloc_bcast_station(struct iwl_priv *priv); +int iwl_update_bcast_station(struct iwl_priv *priv); int iwl_get_free_ucode_key_index(struct iwl_priv *priv); int iwl_send_add_sta(struct iwl_priv *priv, struct iwl_addsta_cmd *sta, u8 flags); @@ -73,7 +74,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, const u8 *addr); int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); +int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, int tid, u16 ssn); int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, @@ -118,4 +119,33 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta) return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id; } + +/** + * iwl_sta_id_or_broadcast - return sta_id or broadcast sta + * @priv: iwl priv + * @sta: mac80211 station + * + * In certain circumstances mac80211 passes a station pointer + * that may be %NULL, for example during TX or key setup. In + * that case, we need to use the broadcast station, so this + * inline wraps that pattern. + */ +static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv, + struct ieee80211_sta *sta) +{ + int sta_id; + + if (!sta) + return priv->hw_params.bcast_sta_id; + + sta_id = iwl_sta_id(sta); + + /* + * mac80211 should not be passing a partially + * initialised station! + */ + WARN_ON(sta_id == IWL_INVALID_STATION); + + return sta_id; +} #endif /* __iwl_sta_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 1ece2ea09773567145a5acde6bf997749856e107..a81989c0698336dab5ebd0127ed01b93de698e57 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -77,21 +77,6 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) } EXPORT_SYMBOL(iwl_txq_update_write_ptr); - -void iwl_free_tfds_in_queue(struct iwl_priv *priv, - int sta_id, int tid, int freed) -{ - if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; - else { - IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", - priv->stations[sta_id].tid[tid].tfds_in_queue, - freed); - priv->stations[sta_id].tid[tid].tfds_in_queue = 0; - } -} -EXPORT_SYMBOL(iwl_free_tfds_in_queue); - /** * iwl_tx_queue_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. @@ -169,15 +154,15 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) } pci_unmap_single(priv->pci_dev, - pci_unmap_addr(&txq->meta[i], mapping), - pci_unmap_len(&txq->meta[i], len), + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), PCI_DMA_BIDIRECTIONAL); } if (huge) { i = q->n_window; pci_unmap_single(priv->pci_dev, - pci_unmap_addr(&txq->meta[i], mapping), - pci_unmap_len(&txq->meta[i], len), + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), PCI_DMA_BIDIRECTIONAL); } @@ -287,7 +272,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv, /* Driver private data, only for Tx (not command) queues, * not shared with device. */ if (id != IWL_CMD_QUEUE_NUM) { - txq->txb = kmalloc(sizeof(txq->txb[0]) * + txq->txb = kzalloc(sizeof(txq->txb[0]) * TFD_QUEUE_SIZE_MAX, GFP_KERNEL); if (!txq->txb) { IWL_ERR(priv, "kmalloc for auxiliary BD " @@ -531,8 +516,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, fix_size, PCI_DMA_BIDIRECTIONAL); - pci_unmap_addr_set(out_meta, mapping, phys_addr); - pci_unmap_len_set(out_meta, len, fix_size); + dma_unmap_addr_set(out_meta, mapping, phys_addr); + dma_unmap_len_set(out_meta, len, fix_size); trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); @@ -626,8 +611,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) meta = &txq->meta[cmd_index]; pci_unmap_single(priv->pci_dev, - pci_unmap_addr(meta, mapping), - pci_unmap_len(meta, len), + dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index a27872de41061d62cec0af818aa72f88927396da..d24eb47d370548b73036411c3532443e2d149c28 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -27,6 +27,8 @@ * *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -197,6 +199,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) { unsigned long flags; + struct iwl_addsta_cmd sta_cmd; spin_lock_irqsave(&priv->sta_lock, flags); memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); @@ -205,11 +208,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); - iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 0); - return 0; + return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } static int iwl3945_set_dynamic_key(struct iwl_priv *priv, @@ -310,9 +313,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, int left) { - if (!iwl_is_associated(priv) || !priv->ibss_beacon || - ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && - (priv->iw_mode != NL80211_IFTYPE_AP))) + if (!iwl_is_associated(priv) || !priv->ibss_beacon) return 0; if (priv->ibss_beacon->len > left) @@ -474,10 +475,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) u8 unicast; u8 sta_id; u8 tid = 0; - u16 seq_number = 0; __le16 fc; u8 wait_write_ptr = 0; - u8 *qc = NULL; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); @@ -510,10 +509,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr_len = ieee80211_hdrlen(fc); /* Find index into station table for destination station */ - if (!info->control.sta) - sta_id = priv->hw_params.bcast_sta_id; - else - sta_id = iwl_sta_id(info->control.sta); + sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); if (sta_id == IWL_INVALID_STATION) { IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", hdr->addr1); @@ -523,16 +519,10 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); if (ieee80211_is_data_qos(fc)) { - qc = ieee80211_get_qos_ctl(hdr); + u8 *qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; if (unlikely(tid >= MAX_TID_COUNT)) goto drop; - seq_number = priv->stations[sta_id].tid[tid].seq_number & - IEEE80211_SCTL_SEQ; - hdr->seq_ctrl = cpu_to_le16(seq_number) | - (hdr->seq_ctrl & - cpu_to_le16(IEEE80211_SCTL_FRAG)); - seq_number += 0x10; } /* Descriptor for chosen Tx queue */ @@ -548,7 +538,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Set up driver data for this TFD */ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); - txq->txb[q->write_ptr].skb[0] = skb; + txq->txb[q->write_ptr].skb = skb; /* Init first empty entry in queue's array of Tx/cmd buffers */ out_cmd = txq->cmd[idx]; @@ -591,8 +581,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) if (!ieee80211_has_morefrags(hdr->frame_control)) { txq->need_update = 1; - if (qc) - priv->stations[sta_id].tid[tid].seq_number = seq_number; } else { wait_write_ptr = 1; txq->need_update = 0; @@ -631,8 +619,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) len, PCI_DMA_TODEVICE); /* we do not map meta data ... so we can safely access address to * provide to unmap command*/ - pci_unmap_addr_set(out_meta, mapping, txcmd_phys); - pci_unmap_len_set(out_meta, len, len); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, len); /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ @@ -677,55 +665,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) return -1; } -#define BEACON_TIME_MASK_LOW 0x00FFFFFF -#define BEACON_TIME_MASK_HIGH 0xFF000000 -#define TIME_UNIT 1024 - -/* - * extended beacon time format - * time in usec will be changed into a 32-bit value in 8:24 format - * the high 1 byte is the beacon counts - * the lower 3 bytes is the time in usec within one beacon interval - */ - -static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval) -{ - u32 quot; - u32 rem; - u32 interval = beacon_interval * 1024; - - if (!interval || !usec) - return 0; - - quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); - rem = (usec % interval) & BEACON_TIME_MASK_LOW; - - return (quot << 24) + rem; -} - -/* base is usually what we get from ucode with each received frame, - * the same as HW timer counter counting down - */ - -static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) -{ - u32 base_low = base & BEACON_TIME_MASK_LOW; - u32 addon_low = addon & BEACON_TIME_MASK_LOW; - u32 interval = beacon_interval * TIME_UNIT; - u32 res = (base & BEACON_TIME_MASK_HIGH) + - (addon & BEACON_TIME_MASK_HIGH); - - if (base_low > addon_low) - res += base_low - addon_low; - else if (base_low < addon_low) { - res += interval + base_low - addon_low; - res += (1 << 24); - } else - res += (1 << 24); - - return cpu_to_le32(res); -} - static int iwl3945_get_measurement(struct iwl_priv *priv, struct ieee80211_measurement_params *params, u8 type) @@ -743,8 +682,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, int duration = le16_to_cpu(params->duration); if (iwl_is_associated(priv)) - add_time = - iwl3945_usecs_to_beacons( + add_time = iwl_usecs_to_beacons(priv, le64_to_cpu(params->start_time) - priv->_3945.last_tsf, le16_to_cpu(priv->rxon_timing.beacon_interval)); @@ -759,8 +697,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, if (iwl_is_associated(priv)) spectrum.start_time = - iwl3945_add_beacon_time(priv->_3945.last_beacon_time, - add_time, + iwl_add_beacon_time(priv, + priv->_3945.last_beacon_time, add_time, le16_to_cpu(priv->rxon_timing.beacon_interval)); else spectrum.start_time = 0; @@ -1233,7 +1171,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx } dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->dma_addr); + rxq->bd_dma); dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); rxq->bd = NULL; @@ -1314,6 +1252,8 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); while (i != r) { + int len; + rxb = rxq->queue[i]; /* If an RXB doesn't have a Rx queue slot associated with it, @@ -1328,8 +1268,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) PCI_DMA_FROMDEVICE); pkt = rxb_addr(rxb); - trace_iwlwifi_dev_rx(priv, pkt, - le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_dev_rx(priv, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. @@ -1483,7 +1424,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv) iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); IWL_ERR(priv, - "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", + "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", desc_lookup(desc), desc, time, blink1, blink2, ilink1, ilink2, data1); trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, @@ -2942,7 +2883,10 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); spin_lock_irqsave(&priv->lock, flags); - interval = vif ? vif->bss_conf.beacon_int : 0; + if (priv->is_internal_short_scan) + interval = 0; + else + interval = vif->bss_conf.beacon_int; spin_unlock_irqrestore(&priv->lock, flags); scan->suspend_time = 0; @@ -3022,14 +2966,16 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->tx_cmd.len = cpu_to_le16( iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, + vif->addr, priv->scan_request->ie, priv->scan_request->ie_len, IWL_MAX_SCAN_SIZE - sizeof(*scan))); } else { + /* use bcast addr, will not be transmitted but must be valid */ scan->tx_cmd.len = cpu_to_le16( iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, - NULL, 0, + iwl_bcast_addr, NULL, 0, IWL_MAX_SCAN_SIZE - sizeof(*scan))); } /* select Rx antennas */ @@ -3158,19 +3104,16 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", vif->bss_conf.aid, vif->bss_conf.beacon_int); - if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + if (vif->bss_conf.use_short_preamble) priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + if (vif->bss_conf.use_short_slot) priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - if (vif->type == NL80211_IFTYPE_ADHOC) - priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; } iwlcore_commit_rxon(priv); @@ -3334,8 +3277,7 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) priv->staging_rxon.assoc_id = 0; - if (vif->bss_conf.assoc_capability & - WLAN_CAPABILITY_SHORT_PREAMBLE) + if (vif->bss_conf.use_short_preamble) priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else @@ -3343,17 +3285,12 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) ~RXON_FLG_SHORT_PREAMBLE_MSK; if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { - if (vif->bss_conf.assoc_capability & - WLAN_CAPABILITY_SHORT_SLOT_TIME) + if (vif->bss_conf.use_short_slot) priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; - - if (vif->type == NL80211_IFTYPE_ADHOC) - priv->staging_rxon.flags &= - ~RXON_FLG_SHORT_SLOT_MSK; } /* restore RXON assoc */ priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; @@ -3386,17 +3323,9 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, static_key = !iwl_is_associated(priv); if (!static_key) { - if (!sta) { - sta_id = priv->hw_params.bcast_sta_id; - } else { - sta_id = iwl_sta_id(sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_MAC80211(priv, - "leave - %pM not in station map.\n", - sta->addr); - return -EINVAL; - } - } + sta_id = iwl_sta_id_or_broadcast(priv, sta); + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; } mutex_lock(&priv->mutex); @@ -4006,7 +3935,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e * space for this driver's private structure */ hw = iwl_alloc_all(cfg, &iwl3945_hw_ops); if (hw == NULL) { - printk(KERN_ERR DRV_NAME "Can not allocate network device\n"); + pr_err("Can not allocate network device\n"); err = -ENOMEM; goto out; } @@ -4028,9 +3957,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e priv->pci_dev = pdev; priv->inta_mask = CSR_INI_SET_MASK; -#ifdef CONFIG_IWLWIFI_DEBUG - atomic_set(&priv->restrict_refcnt, 0); -#endif if (iwl_alloc_traffic_mem(priv)) IWL_ERR(priv, "Not enough memory to generate traffic log\n"); @@ -4099,9 +4025,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e } /* MAC Address location in EEPROM same for 3945/4965 */ eeprom = (struct iwl3945_eeprom *)priv->eeprom; - memcpy(priv->mac_addr, eeprom->mac_address, ETH_ALEN); - IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); - SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address); + SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address); /*********************** * 5. Setup HW Constants @@ -4302,19 +4227,18 @@ static int __init iwl3945_init(void) { int ret; - printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); - printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); ret = iwl3945_rate_control_register(); if (ret) { - printk(KERN_ERR DRV_NAME - "Unable to register rate control algorithm: %d\n", ret); + pr_err("Unable to register rate control algorithm: %d\n", ret); return ret; } ret = pci_register_driver(&iwl3945_driver); if (ret) { - printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); + pr_err("Unable to initialize PCI module\n"); goto error_register; } diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c index 902e95f70f6e7695445d2a3d23d04316bccea1a8..60619678f4ec1cb39c94dd5b97c3977082c168f3 100644 --- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c +++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c @@ -670,20 +670,24 @@ static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, } static int iwm_cfg80211_set_txpower(struct wiphy *wiphy, - enum tx_power_setting type, int dbm) + enum nl80211_tx_power_setting type, int mbm) { struct iwm_priv *iwm = wiphy_to_iwm(wiphy); int ret; switch (type) { - case TX_POWER_AUTOMATIC: + case NL80211_TX_POWER_AUTOMATIC: return 0; - case TX_POWER_FIXED: + case NL80211_TX_POWER_FIXED: + if (mbm < 0 || (mbm % 100)) + return -EOPNOTSUPP; + if (!test_bit(IWM_STATUS_READY, &iwm->status)) return 0; ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, - CFG_TX_PWR_LIMIT_USR, dbm * 2); + CFG_TX_PWR_LIMIT_USR, + MBM_TO_DBM(mbm) * 2); if (ret < 0) return ret; diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h index 7e16bcf59978a99c6a78f38b4913484447072b0f..6421689f5e8e14855795a5e37b09694be63f0091 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.h +++ b/drivers/net/wireless/iwmc3200wifi/commands.h @@ -56,7 +56,7 @@ struct iwm_umac_cmd_reset { __le32 flags; -} __attribute__ ((packed)); +} __packed; #define UMAC_PARAM_TBL_ORD_FIX 0x0 #define UMAC_PARAM_TBL_ORD_VAR 0x1 @@ -220,37 +220,37 @@ struct iwm_umac_cmd_set_param_fix { __le16 tbl; __le16 key; __le32 value; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_set_param_var { __le16 tbl; __le16 key; __le16 len; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_get_param { __le16 tbl; __le16 key; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_get_param_resp { __le16 tbl; __le16 key; __le16 len; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_eeprom_proxy_hdr { __le32 type; __le32 offset; __le32 len; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_eeprom_proxy { struct iwm_umac_cmd_eeprom_proxy_hdr hdr; u8 buf[0]; -} __attribute__ ((packed)); +} __packed; #define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1 #define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2 @@ -267,13 +267,13 @@ struct iwm_umac_channel_info { u8 reserved; u8 flags; __le32 channels_mask; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_get_channel_list { __le16 count; __le16 reserved; struct iwm_umac_channel_info ch[0]; -} __attribute__ ((packed)); +} __packed; /* UMAC WiFi interface commands */ @@ -304,7 +304,7 @@ struct iwm_umac_ssid { u8 ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_scan_request { struct iwm_umac_wifi_if hdr; @@ -314,7 +314,7 @@ struct iwm_umac_cmd_scan_request { u8 timeout; /* In seconds */ u8 reserved; struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX]; -} __attribute__ ((packed)); +} __packed; #define UMAC_CIPHER_TYPE_NONE 0xFF #define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00 @@ -357,7 +357,7 @@ struct iwm_umac_security { u8 ucast_cipher; u8 mcast_cipher; u8 flags; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_ibss { u8 beacon_interval; /* in millisecond */ @@ -366,7 +366,7 @@ struct iwm_umac_ibss { u8 band; u8 channel; u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; #define UMAC_MODE_BSS 0 #define UMAC_MODE_IBSS 1 @@ -385,13 +385,13 @@ struct iwm_umac_profile { __le16 flags; u8 wireless_mode; u8 bss_num; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_invalidate_profile { struct iwm_umac_wifi_if hdr; u8 reason; u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; /* Encryption key commands */ struct iwm_umac_key_wep40 { @@ -400,7 +400,7 @@ struct iwm_umac_key_wep40 { u8 key[WLAN_KEY_LEN_WEP40]; u8 static_key; u8 reserved[2]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_key_wep104 { struct iwm_umac_wifi_if hdr; @@ -408,7 +408,7 @@ struct iwm_umac_key_wep104 { u8 key[WLAN_KEY_LEN_WEP104]; u8 static_key; u8 reserved[2]; -} __attribute__ ((packed)); +} __packed; #define IWM_TKIP_KEY_SIZE 16 #define IWM_TKIP_MIC_SIZE 8 @@ -420,7 +420,7 @@ struct iwm_umac_key_tkip { u8 tkip_key[IWM_TKIP_KEY_SIZE]; u8 mic_rx_key[IWM_TKIP_MIC_SIZE]; u8 mic_tx_key[IWM_TKIP_MIC_SIZE]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_key_ccmp { struct iwm_umac_wifi_if hdr; @@ -428,27 +428,27 @@ struct iwm_umac_key_ccmp { u8 iv_count[6]; u8 reserved[2]; u8 key[WLAN_KEY_LEN_CCMP]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_key_remove { struct iwm_umac_wifi_if hdr; struct iwm_umac_key_hdr key_hdr; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_tx_key_id { struct iwm_umac_wifi_if hdr; u8 key_idx; u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_pwr_trigger { struct iwm_umac_wifi_if hdr; __le32 reseved; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_stats_req { __le32 flags; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_cmd_stop_resume_tx { u8 flags; @@ -456,7 +456,7 @@ struct iwm_umac_cmd_stop_resume_tx { __le16 stop_resume_tid_msk; __le16 last_seq_num[IWM_UMAC_TID_NR]; u16 reserved; -} __attribute__ ((packed)); +} __packed; #define IWM_CMD_PMKID_ADD 1 #define IWM_CMD_PMKID_DEL 2 @@ -468,7 +468,7 @@ struct iwm_umac_pmkid_update { u8 bssid[ETH_ALEN]; __le16 reserved; u8 pmkid[WLAN_PMKID_LEN]; -} __attribute__ ((packed)); +} __packed; /* LMAC commands */ int iwm_read_mac(struct iwm_priv *iwm, u8 *mac); diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c index 9531b18cf72a1f8163c3e89b265f70346a4922aa..907ac890997c8da074272c60361fa3f262482afe 100644 --- a/drivers/net/wireless/iwmc3200wifi/hal.c +++ b/drivers/net/wireless/iwmc3200wifi/hal.c @@ -54,7 +54,7 @@ * LMAC. If you look at LMAC commands you'll se that they * are actually regular iwlwifi target commands encapsulated * into a special UMAC command called UMAC passthrough. - * This is due to the fact the the host talks exclusively + * This is due to the fact the host talks exclusively * to the UMAC and so there needs to be a special UMAC * command for talking to the LMAC. * This is how a wifi command is layed out: diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h index 13266c3842f864b072d9f64e61bf9aedee9af70c..51d7efa15ae60a81fcb9dcfeeb011e80d0faa09b 100644 --- a/drivers/net/wireless/iwmc3200wifi/iwm.h +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h @@ -162,7 +162,7 @@ struct iwm_umac_key_hdr { u8 mac[ETH_ALEN]; u8 key_idx; u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */ -} __attribute__ ((packed)); +} __packed; struct iwm_key { struct iwm_umac_key_hdr hdr; diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h index a855a99e49b80ae80bbf79c2994cf9adab3b0e45..5ddcdf8c70c0945f4ba419cd1bf1ab0bd5e82391 100644 --- a/drivers/net/wireless/iwmc3200wifi/lmac.h +++ b/drivers/net/wireless/iwmc3200wifi/lmac.h @@ -43,7 +43,7 @@ struct iwm_lmac_hdr { u8 id; u8 flags; __le16 seq_num; -} __attribute__ ((packed)); +} __packed; /* LMAC commands */ #define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1 @@ -54,23 +54,23 @@ struct iwm_lmac_cal_cfg_elt { __le32 send_res; /* 1 for sending back results */ __le32 apply_res; /* 1 for applying calibration results to HW */ __le32 reserved; -} __attribute__ ((packed)); +} __packed; struct iwm_lmac_cal_cfg_status { struct iwm_lmac_cal_cfg_elt init; struct iwm_lmac_cal_cfg_elt periodic; __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */ -} __attribute__ ((packed)); +} __packed; struct iwm_lmac_cal_cfg_cmd { struct iwm_lmac_cal_cfg_status ucode_cfg; struct iwm_lmac_cal_cfg_status driver_cfg; __le32 reserved; -} __attribute__ ((packed)); +} __packed; struct iwm_lmac_cal_cfg_resp { __le32 status; -} __attribute__ ((packed)); +} __packed; #define IWM_CARD_STATE_SW_HW_ENABLED 0x00 #define IWM_CARD_STATE_HW_DISABLED 0x01 @@ -80,7 +80,7 @@ struct iwm_lmac_cal_cfg_resp { struct iwm_lmac_card_state { __le32 flags; -} __attribute__ ((packed)); +} __packed; /** * COEX_PRIORITY_TABLE_CMD @@ -131,7 +131,7 @@ struct coex_event { u8 win_med_prio; u8 reserved; u8 flags; -} __attribute__ ((packed)); +} __packed; #define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1 #define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4 @@ -142,7 +142,7 @@ struct iwm_coex_prio_table_cmd { u8 flags; u8 reserved[3]; struct coex_event sta_prio[COEX_EVENTS_NUM]; -} __attribute__ ((packed)); +} __packed; /* Coexistence definitions * @@ -192,7 +192,7 @@ struct iwm_ct_kill_cfg_cmd { u32 exit_threshold; u32 reserved; u32 entry_threshold; -} __attribute__ ((packed)); +} __packed; /* LMAC OP CODES */ @@ -428,7 +428,7 @@ struct iwm_lmac_calib_hdr { u8 first_grp; u8 grp_num; u8 all_data_valid; -} __attribute__ ((packed)); +} __packed; #define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7 #define IWM_CALIB_FREQ_GROUPS_NR 5 @@ -437,20 +437,20 @@ struct iwm_lmac_calib_hdr { struct iwm_calib_rxiq_entry { u16 ptam_postdist_ars; u16 ptam_postdist_arc; -} __attribute__ ((packed)); +} __packed; struct iwm_calib_rxiq_group { struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR]; -} __attribute__ ((packed)); +} __packed; struct iwm_lmac_calib_rxiq { struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR]; -} __attribute__ ((packed)); +} __packed; struct iwm_calib_rxiq { struct iwm_lmac_calib_hdr hdr; struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR]; -} __attribute__ ((packed)); +} __packed; #define LMAC_STA_ID_SEED 0x0f #define LMAC_STA_ID_POS 0 @@ -463,7 +463,7 @@ struct iwm_lmac_power_report { u8 pa_integ_res_A[3]; u8 pa_integ_res_B[3]; u8 pa_integ_res_C[3]; -} __attribute__ ((packed)); +} __packed; struct iwm_lmac_tx_resp { u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */ @@ -479,6 +479,6 @@ struct iwm_lmac_tx_resp { u8 ra_tid; __le16 frame_ctl; __le32 status; -} __attribute__ ((packed)); +} __packed; #endif diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index e1184deca55900636257525f1b24e4bf0f20be1c..c02fcedea9fafc5e80375832d9e8bc7c70867317 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -321,14 +321,14 @@ iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket) return ERR_PTR(-ENOMEM); } - ticket_node->ticket = kzalloc(sizeof(struct iwm_rx_ticket), GFP_KERNEL); + ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket), + GFP_KERNEL); if (!ticket_node->ticket) { IWM_ERR(iwm, "Couldn't allocate RX ticket\n"); kfree(ticket_node); return ERR_PTR(-ENOMEM); } - memcpy(ticket_node->ticket, ticket, sizeof(struct iwm_rx_ticket)); INIT_LIST_HEAD(&ticket_node->node); return ticket_node; diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h index 0cbba3ecc813c717b9fda3d8aea66dab1ff19f70..4a137d334a42b67c541ebde00c6c56c279d8353a 100644 --- a/drivers/net/wireless/iwmc3200wifi/umac.h +++ b/drivers/net/wireless/iwmc3200wifi/umac.h @@ -42,19 +42,19 @@ struct iwm_udma_in_hdr { __le32 cmd; __le32 size; -} __attribute__ ((packed)); +} __packed; struct iwm_udma_out_nonwifi_hdr { __le32 cmd; __le32 addr; __le32 op1_sz; __le32 op2; -} __attribute__ ((packed)); +} __packed; struct iwm_udma_out_wifi_hdr { __le32 cmd; __le32 meta_data; -} __attribute__ ((packed)); +} __packed; /* Sequence numbering */ #define UMAC_WIFI_SEQ_NUM_BASE 1 @@ -408,12 +408,12 @@ struct iwm_rx_ticket { __le16 flags; u8 payload_offset; /* includes: MAC header, pad, IV */ u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */ -} __attribute__ ((packed)); +} __packed; struct iwm_rx_mpdu_hdr { __le16 len; __le16 reserved; -} __attribute__ ((packed)); +} __packed; /* UMAC SW WIFI API */ @@ -421,31 +421,31 @@ struct iwm_dev_cmd_hdr { u8 cmd; u8 flags; __le16 seq_num; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_fw_cmd_hdr { __le32 meta_data; struct iwm_dev_cmd_hdr cmd; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_wifi_out_hdr { struct iwm_udma_out_wifi_hdr hw_hdr; struct iwm_umac_fw_cmd_hdr sw_hdr; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_nonwifi_out_hdr { struct iwm_udma_out_nonwifi_hdr hw_hdr; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_wifi_in_hdr { struct iwm_udma_in_hdr hw_hdr; struct iwm_umac_fw_cmd_hdr sw_hdr; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_nonwifi_in_hdr { struct iwm_udma_in_hdr hw_hdr; __le32 time_stamp; -} __attribute__ ((packed)); +} __packed; #define IWM_UMAC_PAGE_SIZE 0x200 @@ -521,7 +521,7 @@ struct iwm_umac_notif_wifi_if { u8 status; u8 flags; __le16 buf_size; -} __attribute__ ((packed)); +} __packed; #define UMAC_ROAM_REASON_FIRST_SELECTION 0x1 #define UMAC_ROAM_REASON_AP_DEAUTH 0x2 @@ -535,7 +535,7 @@ struct iwm_umac_notif_assoc_start { __le32 roam_reason; u8 bssid[ETH_ALEN]; u8 reserved[2]; -} __attribute__ ((packed)); +} __packed; #define UMAC_ASSOC_COMPLETE_SUCCESS 0x0 #define UMAC_ASSOC_COMPLETE_FAILURE 0x1 @@ -546,7 +546,7 @@ struct iwm_umac_notif_assoc_complete { u8 bssid[ETH_ALEN]; u8 band; u8 channel; -} __attribute__ ((packed)); +} __packed; #define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0 #define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1 @@ -556,7 +556,7 @@ struct iwm_umac_notif_assoc_complete { struct iwm_umac_notif_profile_invalidate { struct iwm_umac_notif_wifi_if mlme_hdr; __le32 reason; -} __attribute__ ((packed)); +} __packed; #define UMAC_SCAN_RESULT_SUCCESS 0x0 #define UMAC_SCAN_RESULT_ABORTED 0x1 @@ -568,7 +568,7 @@ struct iwm_umac_notif_scan_complete { __le32 type; __le32 result; u8 seq_num; -} __attribute__ ((packed)); +} __packed; #define UMAC_OPCODE_ADD_MODIFY 0x0 #define UMAC_OPCODE_REMOVE 0x1 @@ -582,7 +582,7 @@ struct iwm_umac_notif_sta_info { u8 mac_addr[ETH_ALEN]; u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */ u8 flags; -} __attribute__ ((packed)); +} __packed; #define UMAC_BAND_2GHZ 0 #define UMAC_BAND_5GHZ 1 @@ -601,7 +601,7 @@ struct iwm_umac_notif_bss_info { s8 rssi; u8 reserved; u8 frame_buf[1]; -} __attribute__ ((packed)); +} __packed; #define IWM_BSS_REMOVE_INDEX_MSK 0x0fff #define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00 @@ -614,13 +614,13 @@ struct iwm_umac_notif_bss_removed { struct iwm_umac_notif_wifi_if mlme_hdr; __le32 count; __le16 entries[0]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_notif_mgt_frame { struct iwm_umac_notif_wifi_if mlme_hdr; __le16 len; u8 frame[1]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_notif_alive { struct iwm_umac_wifi_in_hdr hdr; @@ -630,13 +630,13 @@ struct iwm_umac_notif_alive { __le16 reserved2; __le16 page_grp_count; __le32 page_grp_state[IWM_MACS_OUT_GROUPS]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_notif_init_complete { struct iwm_umac_wifi_in_hdr hdr; __le16 status; __le16 reserved; -} __attribute__ ((packed)); +} __packed; /* error categories */ enum { @@ -667,12 +667,12 @@ struct iwm_fw_error_hdr { __le32 dbm_buf_end; __le32 dbm_buf_write_ptr; __le32 dbm_buf_cycle_cnt; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_notif_error { struct iwm_umac_wifi_in_hdr hdr; struct iwm_fw_error_hdr err; -} __attribute__ ((packed)); +} __packed; #define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0 #define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff @@ -687,20 +687,20 @@ struct iwm_umac_notif_page_dealloc { struct iwm_umac_wifi_in_hdr hdr; __le32 changes; __le32 grp_info[IWM_MACS_OUT_GROUPS]; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_notif_wifi_status { struct iwm_umac_wifi_in_hdr hdr; __le16 status; __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct iwm_umac_notif_rx_ticket { struct iwm_umac_wifi_in_hdr hdr; u8 num_tickets; u8 reserved[3]; struct iwm_rx_ticket tickets[1]; -} __attribute__ ((packed)); +} __packed; /* Tx/Rx rates window (number of max of last update window per second) */ #define UMAC_NTF_RATE_SAMPLE_NR 4 @@ -758,7 +758,7 @@ struct iwm_umac_notif_stats { __le32 roam_unassoc; __le32 roam_deauth; __le32 roam_ap_loadblance; -} __attribute__ ((packed)); +} __packed; #define UMAC_STOP_TX_FLAG 0x1 #define UMAC_RESUME_TX_FLAG 0x2 @@ -770,7 +770,7 @@ struct iwm_umac_notif_stop_resume_tx { u8 flags; /* UMAC_*_TX_FLAG_* */ u8 sta_id; __le16 stop_resume_tid_msk; /* tid bitmask */ -} __attribute__ ((packed)); +} __packed; #define UMAC_MAX_NUM_PMKIDS 4 @@ -779,7 +779,7 @@ struct iwm_umac_wifi_if { u8 oid; u8 flags; __le16 buf_size; -} __attribute__ ((packed)); +} __packed; #define IWM_SEQ_NUM_HOST_MSK 0x0000 #define IWM_SEQ_NUM_UMAC_MSK 0x4000 diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile index 45e870e331175f30d47edc2ed85091bce1a924ed..f7d01bfa2e4a1ccd8046607457b32639037cb7cc 100644 --- a/drivers/net/wireless/libertas/Makefile +++ b/drivers/net/wireless/libertas/Makefile @@ -1,4 +1,3 @@ -libertas-y += assoc.o libertas-y += cfg.o libertas-y += cmd.o libertas-y += cmdresp.o @@ -6,9 +5,7 @@ libertas-y += debugfs.o libertas-y += ethtool.o libertas-y += main.o libertas-y += rx.o -libertas-y += scan.o libertas-y += tx.o -libertas-y += wext.o libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o usb8xxx-objs += if_usb.o diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README index 2726c044430fb74bedc3fbd87937dc441f277bb8..60fd1afe89ac2ff7eee0c0c7df1f9d15c13ecec3 100644 --- a/drivers/net/wireless/libertas/README +++ b/drivers/net/wireless/libertas/README @@ -226,6 +226,18 @@ setuserscan All entries in the scan table (not just the new scan data when keep=1) will be displayed upon completion by use of the getscantable ioctl. +hostsleep + This command is used to enable/disable host sleep. + Note: Host sleep parameters should be configured using + "ethtool -s ethX wol X" command before enabling host sleep. + + Path: /sys/kernel/debug/libertas_wireless/ethX/ + + Usage: + cat hostsleep: reads the current hostsleep state + echo "1" > hostsleep : enable host sleep. + echo "0" > hostsleep : disable host sleep + ======================== IWCONFIG COMMANDS ======================== diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c deleted file mode 100644 index aa06070e5eabc248ea029e9fbc88c53eed90918e..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/assoc.c +++ /dev/null @@ -1,2264 +0,0 @@ -/* Copyright (C) 2006, Red Hat, Inc. */ - -#include -#include -#include -#include -#include -#include - -#include "assoc.h" -#include "decl.h" -#include "host.h" -#include "scan.h" -#include "cmd.h" - -static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) = - { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; -static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) = - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; - -/* The firmware needs the following bits masked out of the beacon-derived - * capability field when associating/joining to a BSS: - * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused) - */ -#define CAPINFO_MASK (~(0xda00)) - -/** - * 802.11b/g supported bitrates (in 500Kb/s units) - */ -u8 lbs_bg_rates[MAX_RATES] = - { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, -0x00, 0x00 }; - - -static int assoc_helper_wep_keys(struct lbs_private *priv, - struct assoc_request *assoc_req); - -/** - * @brief This function finds common rates between rates and card rates. - * - * It will fill common rates in rates as output if found. - * - * NOTE: Setting the MSB of the basic rates need to be taken - * care, either before or after calling this function - * - * @param priv A pointer to struct lbs_private structure - * @param rates the buffer which keeps input and output - * @param rates_size the size of rates buffer; new size of buffer on return, - * which will be less than or equal to original rates_size - * - * @return 0 on success, or -1 on error - */ -static int get_common_rates(struct lbs_private *priv, - u8 *rates, - u16 *rates_size) -{ - int i, j; - u8 intersection[MAX_RATES]; - u16 intersection_size; - u16 num_rates = 0; - - intersection_size = min_t(u16, *rates_size, ARRAY_SIZE(intersection)); - - /* Allow each rate from 'rates' that is supported by the hardware */ - for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && lbs_bg_rates[i]; i++) { - for (j = 0; j < intersection_size && rates[j]; j++) { - if (rates[j] == lbs_bg_rates[i]) - intersection[num_rates++] = rates[j]; - } - } - - lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); - lbs_deb_hex(LBS_DEB_JOIN, "card rates ", lbs_bg_rates, - ARRAY_SIZE(lbs_bg_rates)); - lbs_deb_hex(LBS_DEB_JOIN, "common rates", intersection, num_rates); - lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); - - if (!priv->enablehwauto) { - for (i = 0; i < num_rates; i++) { - if (intersection[i] == priv->cur_rate) - goto done; - } - lbs_pr_alert("Previously set fixed data rate %#x isn't " - "compatible with the network.\n", priv->cur_rate); - return -1; - } - -done: - memset(rates, 0, *rates_size); - *rates_size = num_rates; - memcpy(rates, intersection, num_rates); - return 0; -} - - -/** - * @brief Sets the MSB on basic rates as the firmware requires - * - * Scan through an array and set the MSB for basic data rates. - * - * @param rates buffer of data rates - * @param len size of buffer - */ -static void lbs_set_basic_rate_flags(u8 *rates, size_t len) -{ - int i; - - for (i = 0; i < len; i++) { - if (rates[i] == 0x02 || rates[i] == 0x04 || - rates[i] == 0x0b || rates[i] == 0x16) - rates[i] |= 0x80; - } -} - - -static u8 iw_auth_to_ieee_auth(u8 auth) -{ - if (auth == IW_AUTH_ALG_OPEN_SYSTEM) - return 0x00; - else if (auth == IW_AUTH_ALG_SHARED_KEY) - return 0x01; - else if (auth == IW_AUTH_ALG_LEAP) - return 0x80; - - lbs_deb_join("%s: invalid auth alg 0x%X\n", __func__, auth); - return 0; -} - -/** - * @brief This function prepares the authenticate command. AUTHENTICATE only - * sets the authentication suite for future associations, as the firmware - * handles authentication internally during the ASSOCIATE command. - * - * @param priv A pointer to struct lbs_private structure - * @param bssid The peer BSSID with which to authenticate - * @param auth The authentication mode to use (from wireless.h) - * - * @return 0 or -1 - */ -static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth) -{ - struct cmd_ds_802_11_authenticate cmd; - int ret = -1; - - lbs_deb_enter(LBS_DEB_JOIN); - - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - memcpy(cmd.bssid, bssid, ETH_ALEN); - - cmd.authtype = iw_auth_to_ieee_auth(auth); - - lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n", bssid, cmd.authtype); - - ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd); - - lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); - return ret; -} - - -int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action, - struct assoc_request *assoc) -{ - struct cmd_ds_802_11_set_wep cmd; - int ret = 0; - - lbs_deb_enter(LBS_DEB_CMD); - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - - cmd.action = cpu_to_le16(cmd_action); - - if (cmd_action == CMD_ACT_ADD) { - int i; - - /* default tx key index */ - cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx & - CMD_WEP_KEY_INDEX_MASK); - - /* Copy key types and material to host command structure */ - for (i = 0; i < 4; i++) { - struct enc_key *pkey = &assoc->wep_keys[i]; - - switch (pkey->len) { - case KEY_LEN_WEP_40: - cmd.keytype[i] = CMD_TYPE_WEP_40_BIT; - memmove(cmd.keymaterial[i], pkey->key, pkey->len); - lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i); - break; - case KEY_LEN_WEP_104: - cmd.keytype[i] = CMD_TYPE_WEP_104_BIT; - memmove(cmd.keymaterial[i], pkey->key, pkey->len); - lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i); - break; - case 0: - break; - default: - lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n", - i, pkey->len); - ret = -1; - goto done; - break; - } - } - } else if (cmd_action == CMD_ACT_REMOVE) { - /* ACT_REMOVE clears _all_ WEP keys */ - - /* default tx key index */ - cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx & - CMD_WEP_KEY_INDEX_MASK); - lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx); - } - - ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd); -done: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return ret; -} - -int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action, - uint16_t *enable) -{ - struct cmd_ds_802_11_enable_rsn cmd; - int ret; - - lbs_deb_enter(LBS_DEB_CMD); - - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - cmd.action = cpu_to_le16(cmd_action); - - if (cmd_action == CMD_ACT_GET) - cmd.enable = 0; - else { - if (*enable) - cmd.enable = cpu_to_le16(CMD_ENABLE_RSN); - else - cmd.enable = cpu_to_le16(CMD_DISABLE_RSN); - lbs_deb_cmd("ENABLE_RSN: %d\n", *enable); - } - - ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd); - if (!ret && cmd_action == CMD_ACT_GET) - *enable = le16_to_cpu(cmd.enable); - - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return ret; -} - -static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam, - struct enc_key *key) -{ - lbs_deb_enter(LBS_DEB_CMD); - - if (key->flags & KEY_INFO_WPA_ENABLED) - keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED); - if (key->flags & KEY_INFO_WPA_UNICAST) - keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST); - if (key->flags & KEY_INFO_WPA_MCAST) - keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST); - - keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL); - keyparam->keytypeid = cpu_to_le16(key->type); - keyparam->keylen = cpu_to_le16(key->len); - memcpy(keyparam->key, key->key, key->len); - - /* Length field doesn't include the {type,length} header */ - keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4); - lbs_deb_leave(LBS_DEB_CMD); -} - -int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action, - struct assoc_request *assoc) -{ - struct cmd_ds_802_11_key_material cmd; - int ret = 0; - int index = 0; - - lbs_deb_enter(LBS_DEB_CMD); - - cmd.action = cpu_to_le16(cmd_action); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - - if (cmd_action == CMD_ACT_GET) { - cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2); - } else { - memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet)); - - if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) { - set_one_wpa_key(&cmd.keyParamSet[index], - &assoc->wpa_unicast_key); - index++; - } - - if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) { - set_one_wpa_key(&cmd.keyParamSet[index], - &assoc->wpa_mcast_key); - index++; - } - - /* The common header and as many keys as we included */ - cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd), - keyParamSet[index])); - } - ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd); - /* Copy the returned key to driver private data */ - if (!ret && cmd_action == CMD_ACT_GET) { - void *buf_ptr = cmd.keyParamSet; - void *resp_end = &(&cmd)[1]; - - while (buf_ptr < resp_end) { - struct MrvlIEtype_keyParamSet *keyparam = buf_ptr; - struct enc_key *key; - uint16_t param_set_len = le16_to_cpu(keyparam->length); - uint16_t key_len = le16_to_cpu(keyparam->keylen); - uint16_t key_flags = le16_to_cpu(keyparam->keyinfo); - uint16_t key_type = le16_to_cpu(keyparam->keytypeid); - void *end; - - end = (void *)keyparam + sizeof(keyparam->type) - + sizeof(keyparam->length) + param_set_len; - - /* Make sure we don't access past the end of the IEs */ - if (end > resp_end) - break; - - if (key_flags & KEY_INFO_WPA_UNICAST) - key = &priv->wpa_unicast_key; - else if (key_flags & KEY_INFO_WPA_MCAST) - key = &priv->wpa_mcast_key; - else - break; - - /* Copy returned key into driver */ - memset(key, 0, sizeof(struct enc_key)); - if (key_len > sizeof(key->key)) - break; - key->type = key_type; - key->flags = key_flags; - key->len = key_len; - memcpy(key->key, keyparam->key, key->len); - - buf_ptr = end + 1; - } - } - - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return ret; -} - -static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok) -{ -/* Bit Rate -* 15:13 Reserved -* 12 54 Mbps -* 11 48 Mbps -* 10 36 Mbps -* 9 24 Mbps -* 8 18 Mbps -* 7 12 Mbps -* 6 9 Mbps -* 5 6 Mbps -* 4 Reserved -* 3 11 Mbps -* 2 5.5 Mbps -* 1 2 Mbps -* 0 1 Mbps -**/ - - uint16_t ratemask; - int i = lbs_data_rate_to_fw_index(rate); - if (lower_rates_ok) - ratemask = (0x1fef >> (12 - i)); - else - ratemask = (1 << i); - return cpu_to_le16(ratemask); -} - -int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv, - uint16_t cmd_action) -{ - struct cmd_ds_802_11_rate_adapt_rateset cmd; - int ret; - - lbs_deb_enter(LBS_DEB_CMD); - - if (!priv->cur_rate && !priv->enablehwauto) - return -EINVAL; - - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - - cmd.action = cpu_to_le16(cmd_action); - cmd.enablehwauto = cpu_to_le16(priv->enablehwauto); - cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto); - ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd); - if (!ret && cmd_action == CMD_ACT_GET) - priv->enablehwauto = le16_to_cpu(cmd.enablehwauto); - - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return ret; -} - -/** - * @brief Set the data rate - * - * @param priv A pointer to struct lbs_private structure - * @param rate The desired data rate, or 0 to clear a locked rate - * - * @return 0 on success, error on failure - */ -int lbs_set_data_rate(struct lbs_private *priv, u8 rate) -{ - struct cmd_ds_802_11_data_rate cmd; - int ret = 0; - - lbs_deb_enter(LBS_DEB_CMD); - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - - if (rate > 0) { - cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE); - cmd.rates[0] = lbs_data_rate_to_fw_index(rate); - if (cmd.rates[0] == 0) { - lbs_deb_cmd("DATA_RATE: invalid requested rate of" - " 0x%02X\n", rate); - ret = 0; - goto out; - } - lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]); - } else { - cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO); - lbs_deb_cmd("DATA_RATE: setting auto\n"); - } - - ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd); - if (ret) - goto out; - - lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd)); - - /* FIXME: get actual rates FW can do if this command actually returns - * all data rates supported. - */ - priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]); - lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate); - -out: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return ret; -} - - -int lbs_cmd_802_11_rssi(struct lbs_private *priv, - struct cmd_ds_command *cmd) -{ - - lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(CMD_802_11_RSSI); - cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + - sizeof(struct cmd_header)); - cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR); - - /* reset Beacon SNR/NF/RSSI values */ - priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0; - priv->SNR[TYPE_BEACON][TYPE_AVG] = 0; - priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0; - priv->NF[TYPE_BEACON][TYPE_AVG] = 0; - priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0; - priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0; - - lbs_deb_leave(LBS_DEB_CMD); - return 0; -} - -int lbs_ret_802_11_rssi(struct lbs_private *priv, - struct cmd_ds_command *resp) -{ - struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp; - - lbs_deb_enter(LBS_DEB_CMD); - - /* store the non average value */ - priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR); - priv->NF[TYPE_BEACON][TYPE_NOAVG] = - get_unaligned_le16(&rssirsp->noisefloor); - - priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR); - priv->NF[TYPE_BEACON][TYPE_AVG] = - get_unaligned_le16(&rssirsp->avgnoisefloor); - - priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = - CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG], - priv->NF[TYPE_BEACON][TYPE_NOAVG]); - - priv->RSSI[TYPE_BEACON][TYPE_AVG] = - CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE, - priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE); - - lbs_deb_cmd("RSSI: beacon %d, avg %d\n", - priv->RSSI[TYPE_BEACON][TYPE_NOAVG], - priv->RSSI[TYPE_BEACON][TYPE_AVG]); - - lbs_deb_leave(LBS_DEB_CMD); - return 0; -} - - -int lbs_cmd_bcn_ctrl(struct lbs_private *priv, - struct cmd_ds_command *cmd, - u16 cmd_action) -{ - struct cmd_ds_802_11_beacon_control - *bcn_ctrl = &cmd->params.bcn_ctrl; - - lbs_deb_enter(LBS_DEB_CMD); - cmd->size = - cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control) - + sizeof(struct cmd_header)); - cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL); - - bcn_ctrl->action = cpu_to_le16(cmd_action); - bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable); - bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period); - - lbs_deb_leave(LBS_DEB_CMD); - return 0; -} - -int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv, - struct cmd_ds_command *resp) -{ - struct cmd_ds_802_11_beacon_control *bcn_ctrl = - &resp->params.bcn_ctrl; - - lbs_deb_enter(LBS_DEB_CMD); - - if (bcn_ctrl->action == CMD_ACT_GET) { - priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable); - priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period); - } - - lbs_deb_enter(LBS_DEB_CMD); - return 0; -} - - - -static int lbs_assoc_post(struct lbs_private *priv, - struct cmd_ds_802_11_associate_response *resp) -{ - int ret = 0; - union iwreq_data wrqu; - struct bss_descriptor *bss; - u16 status_code; - - lbs_deb_enter(LBS_DEB_ASSOC); - - if (!priv->in_progress_assoc_req) { - lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n"); - ret = -1; - goto done; - } - bss = &priv->in_progress_assoc_req->bss; - - /* - * Older FW versions map the IEEE 802.11 Status Code in the association - * response to the following values returned in resp->statuscode: - * - * IEEE Status Code Marvell Status Code - * 0 -> 0x0000 ASSOC_RESULT_SUCCESS - * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED - * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED - * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED - * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED - * others -> 0x0003 ASSOC_RESULT_REFUSED - * - * Other response codes: - * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused) - * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for - * association response from the AP) - */ - - status_code = le16_to_cpu(resp->statuscode); - if (priv->fwrelease < 0x09000000) { - switch (status_code) { - case 0x00: - break; - case 0x01: - lbs_deb_assoc("ASSOC_RESP: invalid parameters\n"); - break; - case 0x02: - lbs_deb_assoc("ASSOC_RESP: internal timer " - "expired while waiting for the AP\n"); - break; - case 0x03: - lbs_deb_assoc("ASSOC_RESP: association " - "refused by AP\n"); - break; - case 0x04: - lbs_deb_assoc("ASSOC_RESP: authentication " - "refused by AP\n"); - break; - default: - lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x " - " unknown\n", status_code); - break; - } - } else { - /* v9+ returns the AP's association response */ - lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x\n", status_code); - } - - if (status_code) { - lbs_mac_event_disconnected(priv); - ret = status_code; - goto done; - } - - lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP", - (void *) (resp + sizeof (resp->hdr)), - le16_to_cpu(resp->hdr.size) - sizeof (resp->hdr)); - - /* Send a Media Connected event, according to the Spec */ - priv->connect_status = LBS_CONNECTED; - - /* Update current SSID and BSSID */ - memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN); - priv->curbssparams.ssid_len = bss->ssid_len; - memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN); - - priv->SNR[TYPE_RXPD][TYPE_AVG] = 0; - priv->NF[TYPE_RXPD][TYPE_AVG] = 0; - - memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR)); - memset(priv->rawNF, 0x00, sizeof(priv->rawNF)); - priv->nextSNRNF = 0; - priv->numSNRNF = 0; - - netif_carrier_on(priv->dev); - if (!priv->tx_pending_len) - netif_wake_queue(priv->dev); - - memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN); - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); - -done: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -/** - * @brief This function prepares an association-class command. - * - * @param priv A pointer to struct lbs_private structure - * @param assoc_req The association request describing the BSS to associate - * or reassociate with - * @param command The actual command, either CMD_802_11_ASSOCIATE or - * CMD_802_11_REASSOCIATE - * - * @return 0 or -1 - */ -static int lbs_associate(struct lbs_private *priv, - struct assoc_request *assoc_req, - u16 command) -{ - struct cmd_ds_802_11_associate cmd; - int ret = 0; - struct bss_descriptor *bss = &assoc_req->bss; - u8 *pos = &(cmd.iebuf[0]); - u16 tmpcap, tmplen, tmpauth; - struct mrvl_ie_ssid_param_set *ssid; - struct mrvl_ie_ds_param_set *ds; - struct mrvl_ie_cf_param_set *cf; - struct mrvl_ie_rates_param_set *rates; - struct mrvl_ie_rsn_param_set *rsn; - struct mrvl_ie_auth_type *auth; - - lbs_deb_enter(LBS_DEB_ASSOC); - - BUG_ON((command != CMD_802_11_ASSOCIATE) && - (command != CMD_802_11_REASSOCIATE)); - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.command = cpu_to_le16(command); - - /* Fill in static fields */ - memcpy(cmd.bssid, bss->bssid, ETH_ALEN); - cmd.listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL); - - /* Capability info */ - tmpcap = (bss->capability & CAPINFO_MASK); - if (bss->mode == IW_MODE_INFRA) - tmpcap |= WLAN_CAPABILITY_ESS; - cmd.capability = cpu_to_le16(tmpcap); - lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap); - - /* SSID */ - ssid = (struct mrvl_ie_ssid_param_set *) pos; - ssid->header.type = cpu_to_le16(TLV_TYPE_SSID); - tmplen = bss->ssid_len; - ssid->header.len = cpu_to_le16(tmplen); - memcpy(ssid->ssid, bss->ssid, tmplen); - pos += sizeof(ssid->header) + tmplen; - - ds = (struct mrvl_ie_ds_param_set *) pos; - ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS); - ds->header.len = cpu_to_le16(1); - ds->channel = bss->phy.ds.channel; - pos += sizeof(ds->header) + 1; - - cf = (struct mrvl_ie_cf_param_set *) pos; - cf->header.type = cpu_to_le16(TLV_TYPE_CF); - tmplen = sizeof(*cf) - sizeof (cf->header); - cf->header.len = cpu_to_le16(tmplen); - /* IE payload should be zeroed, firmware fills it in for us */ - pos += sizeof(*cf); - - rates = (struct mrvl_ie_rates_param_set *) pos; - rates->header.type = cpu_to_le16(TLV_TYPE_RATES); - tmplen = min_t(u16, ARRAY_SIZE(bss->rates), MAX_RATES); - memcpy(&rates->rates, &bss->rates, tmplen); - if (get_common_rates(priv, rates->rates, &tmplen)) { - ret = -1; - goto done; - } - pos += sizeof(rates->header) + tmplen; - rates->header.len = cpu_to_le16(tmplen); - lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen); - - /* Copy the infra. association rates into Current BSS state structure */ - memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates)); - memcpy(&priv->curbssparams.rates, &rates->rates, tmplen); - - /* Set MSB on basic rates as the firmware requires, but _after_ - * copying to current bss rates. - */ - lbs_set_basic_rate_flags(rates->rates, tmplen); - - /* Firmware v9+ indicate authentication suites as a TLV */ - if (priv->fwrelease >= 0x09000000) { - auth = (struct mrvl_ie_auth_type *) pos; - auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); - auth->header.len = cpu_to_le16(2); - tmpauth = iw_auth_to_ieee_auth(priv->secinfo.auth_mode); - auth->auth = cpu_to_le16(tmpauth); - pos += sizeof(auth->header) + 2; - - lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n", - bss->bssid, priv->secinfo.auth_mode); - } - - /* WPA/WPA2 IEs */ - if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) { - rsn = (struct mrvl_ie_rsn_param_set *) pos; - /* WPA_IE or WPA2_IE */ - rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]); - tmplen = (u16) assoc_req->wpa_ie[1]; - rsn->header.len = cpu_to_le16(tmplen); - memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen); - lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: WPA/RSN IE", (u8 *) rsn, - sizeof(rsn->header) + tmplen); - pos += sizeof(rsn->header) + tmplen; - } - - cmd.hdr.size = cpu_to_le16((sizeof(cmd) - sizeof(cmd.iebuf)) + - (u16)(pos - (u8 *) &cmd.iebuf)); - - /* update curbssparams */ - priv->channel = bss->phy.ds.channel; - - ret = lbs_cmd_with_response(priv, command, &cmd); - if (ret == 0) { - ret = lbs_assoc_post(priv, - (struct cmd_ds_802_11_associate_response *) &cmd); - } - -done: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -/** - * @brief Associate to a specific BSS discovered in a scan - * - * @param priv A pointer to struct lbs_private structure - * @param assoc_req The association request describing the BSS to associate with - * - * @return 0-success, otherwise fail - */ -static int lbs_try_associate(struct lbs_private *priv, - struct assoc_request *assoc_req) -{ - int ret; - u8 preamble = RADIO_PREAMBLE_LONG; - - lbs_deb_enter(LBS_DEB_ASSOC); - - /* FW v9 and higher indicate authentication suites as a TLV in the - * association command, not as a separate authentication command. - */ - if (priv->fwrelease < 0x09000000) { - ret = lbs_set_authentication(priv, assoc_req->bss.bssid, - priv->secinfo.auth_mode); - if (ret) - goto out; - } - - /* Use short preamble only when both the BSS and firmware support it */ - if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) - preamble = RADIO_PREAMBLE_SHORT; - - ret = lbs_set_radio(priv, preamble, 1); - if (ret) - goto out; - - ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE); - /* If the association fails with current auth mode, let's - * try by changing the auth mode - */ - if ((priv->authtype_auto) && - (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) && - (assoc_req->secinfo.wep_enabled) && - (priv->connect_status != LBS_CONNECTED)) { - if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM) - priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; - else - priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - if (!assoc_helper_wep_keys(priv, assoc_req)) - ret = lbs_associate(priv, assoc_req, - CMD_802_11_ASSOCIATE); - } - - if (ret) - ret = -1; -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -static int lbs_adhoc_post(struct lbs_private *priv, - struct cmd_ds_802_11_ad_hoc_result *resp) -{ - int ret = 0; - u16 command = le16_to_cpu(resp->hdr.command); - u16 result = le16_to_cpu(resp->hdr.result); - union iwreq_data wrqu; - struct bss_descriptor *bss; - DECLARE_SSID_BUF(ssid); - - lbs_deb_enter(LBS_DEB_JOIN); - - if (!priv->in_progress_assoc_req) { - lbs_deb_join("ADHOC_RESP: no in-progress association " - "request\n"); - ret = -1; - goto done; - } - bss = &priv->in_progress_assoc_req->bss; - - /* - * Join result code 0 --> SUCCESS - */ - if (result) { - lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result); - if (priv->connect_status == LBS_CONNECTED) - lbs_mac_event_disconnected(priv); - ret = -1; - goto done; - } - - /* Send a Media Connected event, according to the Spec */ - priv->connect_status = LBS_CONNECTED; - - if (command == CMD_RET(CMD_802_11_AD_HOC_START)) { - /* Update the created network descriptor with the new BSSID */ - memcpy(bss->bssid, resp->bssid, ETH_ALEN); - } - - /* Set the BSSID from the joined/started descriptor */ - memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN); - - /* Set the new SSID to current SSID */ - memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN); - priv->curbssparams.ssid_len = bss->ssid_len; - - netif_carrier_on(priv->dev); - if (!priv->tx_pending_len) - netif_wake_queue(priv->dev); - - memset(&wrqu, 0, sizeof(wrqu)); - memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN); - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); - - lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n", - print_ssid(ssid, bss->ssid, bss->ssid_len), - priv->curbssparams.bssid, - priv->channel); - -done: - lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); - return ret; -} - -/** - * @brief Join an adhoc network found in a previous scan - * - * @param priv A pointer to struct lbs_private structure - * @param assoc_req The association request describing the BSS to join - * - * @return 0 on success, error on failure - */ -static int lbs_adhoc_join(struct lbs_private *priv, - struct assoc_request *assoc_req) -{ - struct cmd_ds_802_11_ad_hoc_join cmd; - struct bss_descriptor *bss = &assoc_req->bss; - u8 preamble = RADIO_PREAMBLE_LONG; - DECLARE_SSID_BUF(ssid); - u16 ratesize = 0; - int ret = 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - - lbs_deb_join("current SSID '%s', ssid length %u\n", - print_ssid(ssid, priv->curbssparams.ssid, - priv->curbssparams.ssid_len), - priv->curbssparams.ssid_len); - lbs_deb_join("requested ssid '%s', ssid length %u\n", - print_ssid(ssid, bss->ssid, bss->ssid_len), - bss->ssid_len); - - /* check if the requested SSID is already joined */ - if (priv->curbssparams.ssid_len && - !lbs_ssid_cmp(priv->curbssparams.ssid, - priv->curbssparams.ssid_len, - bss->ssid, bss->ssid_len) && - (priv->mode == IW_MODE_ADHOC) && - (priv->connect_status == LBS_CONNECTED)) { - union iwreq_data wrqu; - - lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as " - "current, not attempting to re-join"); - - /* Send the re-association event though, because the association - * request really was successful, even if just a null-op. - */ - memset(&wrqu, 0, sizeof(wrqu)); - memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, - ETH_ALEN); - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); - goto out; - } - - /* Use short preamble only when both the BSS and firmware support it */ - if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { - lbs_deb_join("AdhocJoin: Short preamble\n"); - preamble = RADIO_PREAMBLE_SHORT; - } - - ret = lbs_set_radio(priv, preamble, 1); - if (ret) - goto out; - - lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel); - lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band); - - priv->adhoccreate = 0; - priv->channel = bss->channel; - - /* Build the join command */ - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - - cmd.bss.type = CMD_BSS_TYPE_IBSS; - cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod); - - memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN); - memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len); - - memcpy(&cmd.bss.ds, &bss->phy.ds, sizeof(struct ieee_ie_ds_param_set)); - - memcpy(&cmd.bss.ibss, &bss->ss.ibss, - sizeof(struct ieee_ie_ibss_param_set)); - - cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK); - lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n", - bss->capability, CAPINFO_MASK); - - /* information on BSSID descriptor passed to FW */ - lbs_deb_join("ADHOC_J_CMD: BSSID = %pM, SSID = '%s'\n", - cmd.bss.bssid, cmd.bss.ssid); - - /* Only v8 and below support setting these */ - if (priv->fwrelease < 0x09000000) { - /* failtimeout */ - cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); - /* probedelay */ - cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); - } - - /* Copy Data rates from the rates recorded in scan response */ - memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); - ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), ARRAY_SIZE (bss->rates)); - memcpy(cmd.bss.rates, bss->rates, ratesize); - if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { - lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n"); - ret = -1; - goto out; - } - - /* Copy the ad-hoc creation rates into Current BSS state structure */ - memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates)); - memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize); - - /* Set MSB on basic rates as the firmware requires, but _after_ - * copying to current bss rates. - */ - lbs_set_basic_rate_flags(cmd.bss.rates, ratesize); - - cmd.bss.ibss.atimwindow = bss->atimwindow; - - if (assoc_req->secinfo.wep_enabled) { - u16 tmp = le16_to_cpu(cmd.bss.capability); - tmp |= WLAN_CAPABILITY_PRIVACY; - cmd.bss.capability = cpu_to_le16(tmp); - } - - if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { - __le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM); - - /* wake up first */ - ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE, - CMD_ACT_SET, 0, 0, - &local_ps_mode); - if (ret) { - ret = -1; - goto out; - } - } - - ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); - if (ret == 0) { - ret = lbs_adhoc_post(priv, - (struct cmd_ds_802_11_ad_hoc_result *)&cmd); - } - -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -/** - * @brief Start an Adhoc Network - * - * @param priv A pointer to struct lbs_private structure - * @param assoc_req The association request describing the BSS to start - * - * @return 0 on success, error on failure - */ -static int lbs_adhoc_start(struct lbs_private *priv, - struct assoc_request *assoc_req) -{ - struct cmd_ds_802_11_ad_hoc_start cmd; - u8 preamble = RADIO_PREAMBLE_SHORT; - size_t ratesize = 0; - u16 tmpcap = 0; - int ret = 0; - DECLARE_SSID_BUF(ssid); - - lbs_deb_enter(LBS_DEB_ASSOC); - - ret = lbs_set_radio(priv, preamble, 1); - if (ret) - goto out; - - /* Build the start command */ - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - - memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len); - - lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n", - print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len), - assoc_req->ssid_len); - - cmd.bsstype = CMD_BSS_TYPE_IBSS; - - if (priv->beacon_period == 0) - priv->beacon_period = MRVDRV_BEACON_INTERVAL; - cmd.beaconperiod = cpu_to_le16(priv->beacon_period); - - WARN_ON(!assoc_req->channel); - - /* set Physical parameter set */ - cmd.ds.header.id = WLAN_EID_DS_PARAMS; - cmd.ds.header.len = 1; - cmd.ds.channel = assoc_req->channel; - - /* set IBSS parameter set */ - cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS; - cmd.ibss.header.len = 2; - cmd.ibss.atimwindow = cpu_to_le16(0); - - /* set capability info */ - tmpcap = WLAN_CAPABILITY_IBSS; - if (assoc_req->secinfo.wep_enabled || - assoc_req->secinfo.WPAenabled || - assoc_req->secinfo.WPA2enabled) { - lbs_deb_join("ADHOC_START: WEP/WPA enabled, privacy on\n"); - tmpcap |= WLAN_CAPABILITY_PRIVACY; - } else - lbs_deb_join("ADHOC_START: WEP disabled, privacy off\n"); - - cmd.capability = cpu_to_le16(tmpcap); - - /* Only v8 and below support setting probe delay */ - if (priv->fwrelease < 0x09000000) - cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); - - ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates)); - memcpy(cmd.rates, lbs_bg_rates, ratesize); - - /* Copy the ad-hoc creating rates into Current BSS state structure */ - memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates)); - memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize); - - /* Set MSB on basic rates as the firmware requires, but _after_ - * copying to current bss rates. - */ - lbs_set_basic_rate_flags(cmd.rates, ratesize); - - lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n", - cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]); - - lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n", - assoc_req->channel, assoc_req->band); - - priv->adhoccreate = 1; - priv->mode = IW_MODE_ADHOC; - - ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd); - if (ret == 0) - ret = lbs_adhoc_post(priv, - (struct cmd_ds_802_11_ad_hoc_result *)&cmd); - -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -/** - * @brief Stop and Ad-Hoc network and exit Ad-Hoc mode - * - * @param priv A pointer to struct lbs_private structure - * @return 0 on success, or an error - */ -int lbs_adhoc_stop(struct lbs_private *priv) -{ - struct cmd_ds_802_11_ad_hoc_stop cmd; - int ret; - - lbs_deb_enter(LBS_DEB_JOIN); - - memset(&cmd, 0, sizeof (cmd)); - cmd.hdr.size = cpu_to_le16 (sizeof (cmd)); - - ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd); - - /* Clean up everything even if there was an error */ - lbs_mac_event_disconnected(priv); - - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -static inline int match_bss_no_security(struct lbs_802_11_security *secinfo, - struct bss_descriptor *match_bss) -{ - if (!secinfo->wep_enabled && - !secinfo->WPAenabled && !secinfo->WPA2enabled && - match_bss->wpa_ie[0] != WLAN_EID_GENERIC && - match_bss->rsn_ie[0] != WLAN_EID_RSN && - !(match_bss->capability & WLAN_CAPABILITY_PRIVACY)) - return 1; - else - return 0; -} - -static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo, - struct bss_descriptor *match_bss) -{ - if (secinfo->wep_enabled && - !secinfo->WPAenabled && !secinfo->WPA2enabled && - (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) - return 1; - else - return 0; -} - -static inline int match_bss_wpa(struct lbs_802_11_security *secinfo, - struct bss_descriptor *match_bss) -{ - if (!secinfo->wep_enabled && secinfo->WPAenabled && - (match_bss->wpa_ie[0] == WLAN_EID_GENERIC) - /* privacy bit may NOT be set in some APs like LinkSys WRT54G - && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */ - ) - return 1; - else - return 0; -} - -static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo, - struct bss_descriptor *match_bss) -{ - if (!secinfo->wep_enabled && secinfo->WPA2enabled && - (match_bss->rsn_ie[0] == WLAN_EID_RSN) - /* privacy bit may NOT be set in some APs like LinkSys WRT54G - (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */ - ) - return 1; - else - return 0; -} - -static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo, - struct bss_descriptor *match_bss) -{ - if (!secinfo->wep_enabled && - !secinfo->WPAenabled && !secinfo->WPA2enabled && - (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) && - (match_bss->rsn_ie[0] != WLAN_EID_RSN) && - (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) - return 1; - else - return 0; -} - -/** - * @brief Check if a scanned network compatible with the driver settings - * - * WEP WPA WPA2 ad-hoc encrypt Network - * enabled enabled enabled AES mode privacy WPA WPA2 Compatible - * 0 0 0 0 NONE 0 0 0 yes No security - * 1 0 0 0 NONE 1 0 0 yes Static WEP - * 0 1 0 0 x 1x 1 x yes WPA - * 0 0 1 0 x 1x x 1 yes WPA2 - * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES - * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP - * - * - * @param priv A pointer to struct lbs_private - * @param index Index in scantable to check against current driver settings - * @param mode Network mode: Infrastructure or IBSS - * - * @return Index in scantable, or error code if negative - */ -static int is_network_compatible(struct lbs_private *priv, - struct bss_descriptor *bss, uint8_t mode) -{ - int matched = 0; - - lbs_deb_enter(LBS_DEB_SCAN); - - if (bss->mode != mode) - goto done; - - matched = match_bss_no_security(&priv->secinfo, bss); - if (matched) - goto done; - matched = match_bss_static_wep(&priv->secinfo, bss); - if (matched) - goto done; - matched = match_bss_wpa(&priv->secinfo, bss); - if (matched) { - lbs_deb_scan("is_network_compatible() WPA: wpa_ie 0x%x " - "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s " - "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0], - priv->secinfo.wep_enabled ? "e" : "d", - priv->secinfo.WPAenabled ? "e" : "d", - priv->secinfo.WPA2enabled ? "e" : "d", - (bss->capability & WLAN_CAPABILITY_PRIVACY)); - goto done; - } - matched = match_bss_wpa2(&priv->secinfo, bss); - if (matched) { - lbs_deb_scan("is_network_compatible() WPA2: wpa_ie 0x%x " - "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s " - "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0], - priv->secinfo.wep_enabled ? "e" : "d", - priv->secinfo.WPAenabled ? "e" : "d", - priv->secinfo.WPA2enabled ? "e" : "d", - (bss->capability & WLAN_CAPABILITY_PRIVACY)); - goto done; - } - matched = match_bss_dynamic_wep(&priv->secinfo, bss); - if (matched) { - lbs_deb_scan("is_network_compatible() dynamic WEP: " - "wpa_ie 0x%x wpa2_ie 0x%x privacy 0x%x\n", - bss->wpa_ie[0], bss->rsn_ie[0], - (bss->capability & WLAN_CAPABILITY_PRIVACY)); - goto done; - } - - /* bss security settings don't match those configured on card */ - lbs_deb_scan("is_network_compatible() FAILED: wpa_ie 0x%x " - "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s privacy 0x%x\n", - bss->wpa_ie[0], bss->rsn_ie[0], - priv->secinfo.wep_enabled ? "e" : "d", - priv->secinfo.WPAenabled ? "e" : "d", - priv->secinfo.WPA2enabled ? "e" : "d", - (bss->capability & WLAN_CAPABILITY_PRIVACY)); - -done: - lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched); - return matched; -} - -/** - * @brief This function finds a specific compatible BSSID in the scan list - * - * Used in association code - * - * @param priv A pointer to struct lbs_private - * @param bssid BSSID to find in the scan list - * @param mode Network mode: Infrastructure or IBSS - * - * @return index in BSSID list, or error return code (< 0) - */ -static struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv, - uint8_t *bssid, uint8_t mode) -{ - struct bss_descriptor *iter_bss; - struct bss_descriptor *found_bss = NULL; - - lbs_deb_enter(LBS_DEB_SCAN); - - if (!bssid) - goto out; - - lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN); - - /* Look through the scan table for a compatible match. The loop will - * continue past a matched bssid that is not compatible in case there - * is an AP with multiple SSIDs assigned to the same BSSID - */ - mutex_lock(&priv->lock); - list_for_each_entry(iter_bss, &priv->network_list, list) { - if (compare_ether_addr(iter_bss->bssid, bssid)) - continue; /* bssid doesn't match */ - switch (mode) { - case IW_MODE_INFRA: - case IW_MODE_ADHOC: - if (!is_network_compatible(priv, iter_bss, mode)) - break; - found_bss = iter_bss; - break; - default: - found_bss = iter_bss; - break; - } - } - mutex_unlock(&priv->lock); - -out: - lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); - return found_bss; -} - -/** - * @brief This function finds ssid in ssid list. - * - * Used in association code - * - * @param priv A pointer to struct lbs_private - * @param ssid SSID to find in the list - * @param bssid BSSID to qualify the SSID selection (if provided) - * @param mode Network mode: Infrastructure or IBSS - * - * @return index in BSSID list - */ -static struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv, - uint8_t *ssid, uint8_t ssid_len, - uint8_t *bssid, uint8_t mode, - int channel) -{ - u32 bestrssi = 0; - struct bss_descriptor *iter_bss = NULL; - struct bss_descriptor *found_bss = NULL; - struct bss_descriptor *tmp_oldest = NULL; - - lbs_deb_enter(LBS_DEB_SCAN); - - mutex_lock(&priv->lock); - - list_for_each_entry(iter_bss, &priv->network_list, list) { - if (!tmp_oldest || - (iter_bss->last_scanned < tmp_oldest->last_scanned)) - tmp_oldest = iter_bss; - - if (lbs_ssid_cmp(iter_bss->ssid, iter_bss->ssid_len, - ssid, ssid_len) != 0) - continue; /* ssid doesn't match */ - if (bssid && compare_ether_addr(iter_bss->bssid, bssid) != 0) - continue; /* bssid doesn't match */ - if ((channel > 0) && (iter_bss->channel != channel)) - continue; /* channel doesn't match */ - - switch (mode) { - case IW_MODE_INFRA: - case IW_MODE_ADHOC: - if (!is_network_compatible(priv, iter_bss, mode)) - break; - - if (bssid) { - /* Found requested BSSID */ - found_bss = iter_bss; - goto out; - } - - if (SCAN_RSSI(iter_bss->rssi) > bestrssi) { - bestrssi = SCAN_RSSI(iter_bss->rssi); - found_bss = iter_bss; - } - break; - case IW_MODE_AUTO: - default: - if (SCAN_RSSI(iter_bss->rssi) > bestrssi) { - bestrssi = SCAN_RSSI(iter_bss->rssi); - found_bss = iter_bss; - } - break; - } - } - -out: - mutex_unlock(&priv->lock); - lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); - return found_bss; -} - -static int assoc_helper_essid(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - struct bss_descriptor * bss; - int channel = -1; - DECLARE_SSID_BUF(ssid); - - lbs_deb_enter(LBS_DEB_ASSOC); - - /* FIXME: take channel into account when picking SSIDs if a channel - * is set. - */ - - if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) - channel = assoc_req->channel; - - lbs_deb_assoc("SSID '%s' requested\n", - print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len)); - if (assoc_req->mode == IW_MODE_INFRA) { - lbs_send_specific_ssid_scan(priv, assoc_req->ssid, - assoc_req->ssid_len); - - bss = lbs_find_ssid_in_list(priv, assoc_req->ssid, - assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel); - if (bss != NULL) { - memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); - ret = lbs_try_associate(priv, assoc_req); - } else { - lbs_deb_assoc("SSID not found; cannot associate\n"); - } - } else if (assoc_req->mode == IW_MODE_ADHOC) { - /* Scan for the network, do not save previous results. Stale - * scan data will cause us to join a non-existant adhoc network - */ - lbs_send_specific_ssid_scan(priv, assoc_req->ssid, - assoc_req->ssid_len); - - /* Search for the requested SSID in the scan table */ - bss = lbs_find_ssid_in_list(priv, assoc_req->ssid, - assoc_req->ssid_len, NULL, IW_MODE_ADHOC, channel); - if (bss != NULL) { - lbs_deb_assoc("SSID found, will join\n"); - memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); - lbs_adhoc_join(priv, assoc_req); - } else { - /* else send START command */ - lbs_deb_assoc("SSID not found, creating adhoc network\n"); - memcpy(&assoc_req->bss.ssid, &assoc_req->ssid, - IEEE80211_MAX_SSID_LEN); - assoc_req->bss.ssid_len = assoc_req->ssid_len; - lbs_adhoc_start(priv, assoc_req); - } - } - - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int assoc_helper_bssid(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - struct bss_descriptor * bss; - - lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %pM", assoc_req->bssid); - - /* Search for index position in list for requested MAC */ - bss = lbs_find_bssid_in_list(priv, assoc_req->bssid, - assoc_req->mode); - if (bss == NULL) { - lbs_deb_assoc("ASSOC: WAP: BSSID %pM not found, " - "cannot associate.\n", assoc_req->bssid); - goto out; - } - - memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); - if (assoc_req->mode == IW_MODE_INFRA) { - ret = lbs_try_associate(priv, assoc_req); - lbs_deb_assoc("ASSOC: lbs_try_associate(bssid) returned %d\n", - ret); - } else if (assoc_req->mode == IW_MODE_ADHOC) { - lbs_adhoc_join(priv, assoc_req); - } - -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int assoc_helper_associate(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0, done = 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - - /* If we're given and 'any' BSSID, try associating based on SSID */ - - if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { - if (compare_ether_addr(bssid_any, assoc_req->bssid) && - compare_ether_addr(bssid_off, assoc_req->bssid)) { - ret = assoc_helper_bssid(priv, assoc_req); - done = 1; - } - } - - if (!done && test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { - ret = assoc_helper_essid(priv, assoc_req); - } - - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int assoc_helper_mode(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - - if (assoc_req->mode == priv->mode) - goto done; - - if (assoc_req->mode == IW_MODE_INFRA) { - if (priv->psstate != PS_STATE_FULL_POWER) - lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); - priv->psmode = LBS802_11POWERMODECAM; - } - - priv->mode = assoc_req->mode; - ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, - assoc_req->mode == IW_MODE_ADHOC ? 2 : 1); - -done: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -static int assoc_helper_channel(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - - ret = lbs_update_channel(priv); - if (ret) { - lbs_deb_assoc("ASSOC: channel: error getting channel.\n"); - goto done; - } - - if (assoc_req->channel == priv->channel) - goto done; - - if (priv->mesh_dev) { - /* Change mesh channel first; 21.p21 firmware won't let - you change channel otherwise (even though it'll return - an error to this */ - lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, - assoc_req->channel); - } - - lbs_deb_assoc("ASSOC: channel: %d -> %d\n", - priv->channel, assoc_req->channel); - - ret = lbs_set_channel(priv, assoc_req->channel); - if (ret < 0) - lbs_deb_assoc("ASSOC: channel: error setting channel.\n"); - - /* FIXME: shouldn't need to grab the channel _again_ after setting - * it since the firmware is supposed to return the new channel, but - * whatever... */ - ret = lbs_update_channel(priv); - if (ret) { - lbs_deb_assoc("ASSOC: channel: error getting channel.\n"); - goto done; - } - - if (assoc_req->channel != priv->channel) { - lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n", - assoc_req->channel); - goto restore_mesh; - } - - if (assoc_req->secinfo.wep_enabled && - (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len || - assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) { - /* Make sure WEP keys are re-sent to firmware */ - set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); - } - - /* Must restart/rejoin adhoc networks after channel change */ - set_bit(ASSOC_FLAG_SSID, &assoc_req->flags); - - restore_mesh: - if (priv->mesh_dev) - lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, - priv->channel); - - done: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int assoc_helper_wep_keys(struct lbs_private *priv, - struct assoc_request *assoc_req) -{ - int i; - int ret = 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - - /* Set or remove WEP keys */ - if (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len || - assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len) - ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_ADD, assoc_req); - else - ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_REMOVE, assoc_req); - - if (ret) - goto out; - - /* enable/disable the MAC's WEP packet filter */ - if (assoc_req->secinfo.wep_enabled) - priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE; - else - priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE; - - lbs_set_mac_control(priv); - - mutex_lock(&priv->lock); - - /* Copy WEP keys into priv wep key fields */ - for (i = 0; i < 4; i++) { - memcpy(&priv->wep_keys[i], &assoc_req->wep_keys[i], - sizeof(struct enc_key)); - } - priv->wep_tx_keyidx = assoc_req->wep_tx_keyidx; - - mutex_unlock(&priv->lock); - -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - -static int assoc_helper_secinfo(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - uint16_t do_wpa; - uint16_t rsn = 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - - memcpy(&priv->secinfo, &assoc_req->secinfo, - sizeof(struct lbs_802_11_security)); - - lbs_set_mac_control(priv); - - /* If RSN is already enabled, don't try to enable it again, since - * ENABLE_RSN resets internal state machines and will clobber the - * 4-way WPA handshake. - */ - - /* Get RSN enabled/disabled */ - ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_GET, &rsn); - if (ret) { - lbs_deb_assoc("Failed to get RSN status: %d\n", ret); - goto out; - } - - /* Don't re-enable RSN if it's already enabled */ - do_wpa = assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled; - if (do_wpa == rsn) - goto out; - - /* Set RSN enabled/disabled */ - ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_SET, &do_wpa); - -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int assoc_helper_wpa_keys(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - unsigned int flags = assoc_req->flags; - - lbs_deb_enter(LBS_DEB_ASSOC); - - /* Work around older firmware bug where WPA unicast and multicast - * keys must be set independently. Seen in SDIO parts with firmware - * version 5.0.11p0. - */ - - if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { - clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); - ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req); - assoc_req->flags = flags; - } - - if (ret) - goto out; - - memcpy(&priv->wpa_unicast_key, &assoc_req->wpa_unicast_key, - sizeof(struct enc_key)); - - if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { - clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); - - ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req); - assoc_req->flags = flags; - - memcpy(&priv->wpa_mcast_key, &assoc_req->wpa_mcast_key, - sizeof(struct enc_key)); - } - -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int assoc_helper_wpa_ie(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - - if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) { - memcpy(&priv->wpa_ie, &assoc_req->wpa_ie, assoc_req->wpa_ie_len); - priv->wpa_ie_len = assoc_req->wpa_ie_len; - } else { - memset(&priv->wpa_ie, 0, MAX_WPA_IE_LEN); - priv->wpa_ie_len = 0; - } - - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int should_deauth_infrastructure(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - int ret = 0; - - if (priv->connect_status != LBS_CONNECTED) - return 0; - - lbs_deb_enter(LBS_DEB_ASSOC); - if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { - lbs_deb_assoc("Deauthenticating due to new SSID\n"); - ret = 1; - goto out; - } - - if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { - if (priv->secinfo.auth_mode != assoc_req->secinfo.auth_mode) { - lbs_deb_assoc("Deauthenticating due to new security\n"); - ret = 1; - goto out; - } - } - - if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { - lbs_deb_assoc("Deauthenticating due to new BSSID\n"); - ret = 1; - goto out; - } - - if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { - lbs_deb_assoc("Deauthenticating due to channel switch\n"); - ret = 1; - goto out; - } - - /* FIXME: deal with 'auto' mode somehow */ - if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { - if (assoc_req->mode != IW_MODE_INFRA) { - lbs_deb_assoc("Deauthenticating due to leaving " - "infra mode\n"); - ret = 1; - goto out; - } - } - -out: - lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); - return ret; -} - - -static int should_stop_adhoc(struct lbs_private *priv, - struct assoc_request * assoc_req) -{ - lbs_deb_enter(LBS_DEB_ASSOC); - - if (priv->connect_status != LBS_CONNECTED) - return 0; - - if (lbs_ssid_cmp(priv->curbssparams.ssid, - priv->curbssparams.ssid_len, - assoc_req->ssid, assoc_req->ssid_len) != 0) - return 1; - - /* FIXME: deal with 'auto' mode somehow */ - if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { - if (assoc_req->mode != IW_MODE_ADHOC) - return 1; - } - - if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { - if (assoc_req->channel != priv->channel) - return 1; - } - - lbs_deb_leave(LBS_DEB_ASSOC); - return 0; -} - - -/** - * @brief This function finds the best SSID in the Scan List - * - * Search the scan table for the best SSID that also matches the current - * adapter network preference (infrastructure or adhoc) - * - * @param priv A pointer to struct lbs_private - * - * @return index in BSSID list - */ -static struct bss_descriptor *lbs_find_best_ssid_in_list( - struct lbs_private *priv, uint8_t mode) -{ - uint8_t bestrssi = 0; - struct bss_descriptor *iter_bss; - struct bss_descriptor *best_bss = NULL; - - lbs_deb_enter(LBS_DEB_SCAN); - - mutex_lock(&priv->lock); - - list_for_each_entry(iter_bss, &priv->network_list, list) { - switch (mode) { - case IW_MODE_INFRA: - case IW_MODE_ADHOC: - if (!is_network_compatible(priv, iter_bss, mode)) - break; - if (SCAN_RSSI(iter_bss->rssi) <= bestrssi) - break; - bestrssi = SCAN_RSSI(iter_bss->rssi); - best_bss = iter_bss; - break; - case IW_MODE_AUTO: - default: - if (SCAN_RSSI(iter_bss->rssi) <= bestrssi) - break; - bestrssi = SCAN_RSSI(iter_bss->rssi); - best_bss = iter_bss; - break; - } - } - - mutex_unlock(&priv->lock); - lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss); - return best_bss; -} - -/** - * @brief Find the best AP - * - * Used from association worker. - * - * @param priv A pointer to struct lbs_private structure - * @param pSSID A pointer to AP's ssid - * - * @return 0--success, otherwise--fail - */ -static int lbs_find_best_network_ssid(struct lbs_private *priv, - uint8_t *out_ssid, uint8_t *out_ssid_len, uint8_t preferred_mode, - uint8_t *out_mode) -{ - int ret = -1; - struct bss_descriptor *found; - - lbs_deb_enter(LBS_DEB_SCAN); - - priv->scan_ssid_len = 0; - lbs_scan_networks(priv, 1); - if (priv->surpriseremoved) - goto out; - - found = lbs_find_best_ssid_in_list(priv, preferred_mode); - if (found && (found->ssid_len > 0)) { - memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN); - *out_ssid_len = found->ssid_len; - *out_mode = found->mode; - ret = 0; - } - -out: - lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); - return ret; -} - - -void lbs_association_worker(struct work_struct *work) -{ - struct lbs_private *priv = container_of(work, struct lbs_private, - assoc_work.work); - struct assoc_request * assoc_req = NULL; - int ret = 0; - int find_any_ssid = 0; - DECLARE_SSID_BUF(ssid); - - lbs_deb_enter(LBS_DEB_ASSOC); - - mutex_lock(&priv->lock); - assoc_req = priv->pending_assoc_req; - priv->pending_assoc_req = NULL; - priv->in_progress_assoc_req = assoc_req; - mutex_unlock(&priv->lock); - - if (!assoc_req) - goto done; - - lbs_deb_assoc( - "Association Request:\n" - " flags: 0x%08lx\n" - " SSID: '%s'\n" - " chann: %d\n" - " band: %d\n" - " mode: %d\n" - " BSSID: %pM\n" - " secinfo: %s%s%s\n" - " auth_mode: %d\n", - assoc_req->flags, - print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len), - assoc_req->channel, assoc_req->band, assoc_req->mode, - assoc_req->bssid, - assoc_req->secinfo.WPAenabled ? " WPA" : "", - assoc_req->secinfo.WPA2enabled ? " WPA2" : "", - assoc_req->secinfo.wep_enabled ? " WEP" : "", - assoc_req->secinfo.auth_mode); - - /* If 'any' SSID was specified, find an SSID to associate with */ - if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) && - !assoc_req->ssid_len) - find_any_ssid = 1; - - /* But don't use 'any' SSID if there's a valid locked BSSID to use */ - if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { - if (compare_ether_addr(assoc_req->bssid, bssid_any) && - compare_ether_addr(assoc_req->bssid, bssid_off)) - find_any_ssid = 0; - } - - if (find_any_ssid) { - u8 new_mode = assoc_req->mode; - - ret = lbs_find_best_network_ssid(priv, assoc_req->ssid, - &assoc_req->ssid_len, assoc_req->mode, &new_mode); - if (ret) { - lbs_deb_assoc("Could not find best network\n"); - ret = -ENETUNREACH; - goto out; - } - - /* Ensure we switch to the mode of the AP */ - if (assoc_req->mode == IW_MODE_AUTO) { - set_bit(ASSOC_FLAG_MODE, &assoc_req->flags); - assoc_req->mode = new_mode; - } - } - - /* - * Check if the attributes being changing require deauthentication - * from the currently associated infrastructure access point. - */ - if (priv->mode == IW_MODE_INFRA) { - if (should_deauth_infrastructure(priv, assoc_req)) { - ret = lbs_cmd_80211_deauthenticate(priv, - priv->curbssparams.bssid, - WLAN_REASON_DEAUTH_LEAVING); - if (ret) { - lbs_deb_assoc("Deauthentication due to new " - "configuration request failed: %d\n", - ret); - } - } - } else if (priv->mode == IW_MODE_ADHOC) { - if (should_stop_adhoc(priv, assoc_req)) { - ret = lbs_adhoc_stop(priv); - if (ret) { - lbs_deb_assoc("Teardown of AdHoc network due to " - "new configuration request failed: %d\n", - ret); - } - - } - } - - /* Send the various configuration bits to the firmware */ - if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { - ret = assoc_helper_mode(priv, assoc_req); - if (ret) - goto out; - } - - if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { - ret = assoc_helper_channel(priv, assoc_req); - if (ret) - goto out; - } - - if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { - ret = assoc_helper_secinfo(priv, assoc_req); - if (ret) - goto out; - } - - if (test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) { - ret = assoc_helper_wpa_ie(priv, assoc_req); - if (ret) - goto out; - } - - /* - * v10 FW wants WPA keys to be set/cleared before WEP key operations, - * otherwise it will fail to correctly associate to WEP networks. - * Other firmware versions don't appear to care. - */ - if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) || - test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { - ret = assoc_helper_wpa_keys(priv, assoc_req); - if (ret) - goto out; - } - - if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) || - test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) { - ret = assoc_helper_wep_keys(priv, assoc_req); - if (ret) - goto out; - } - - - /* SSID/BSSID should be the _last_ config option set, because they - * trigger the association attempt. - */ - if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) || - test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { - int success = 1; - - ret = assoc_helper_associate(priv, assoc_req); - if (ret) { - lbs_deb_assoc("ASSOC: association unsuccessful: %d\n", - ret); - success = 0; - } - - if (priv->connect_status != LBS_CONNECTED) { - lbs_deb_assoc("ASSOC: association unsuccessful, " - "not connected\n"); - success = 0; - } - - if (success) { - lbs_deb_assoc("associated to %pM\n", - priv->curbssparams.bssid); - lbs_prepare_and_send_command(priv, - CMD_802_11_RSSI, - 0, CMD_OPTION_WAITFORRSP, 0, NULL); - } else { - ret = -1; - } - } - -out: - if (ret) { - lbs_deb_assoc("ASSOC: reconfiguration attempt unsuccessful: %d\n", - ret); - } - - mutex_lock(&priv->lock); - priv->in_progress_assoc_req = NULL; - mutex_unlock(&priv->lock); - kfree(assoc_req); - -done: - lbs_deb_leave(LBS_DEB_ASSOC); -} - - -/* - * Caller MUST hold any necessary locks - */ -struct assoc_request *lbs_get_association_request(struct lbs_private *priv) -{ - struct assoc_request * assoc_req; - - lbs_deb_enter(LBS_DEB_ASSOC); - if (!priv->pending_assoc_req) { - priv->pending_assoc_req = kzalloc(sizeof(struct assoc_request), - GFP_KERNEL); - if (!priv->pending_assoc_req) { - lbs_pr_info("Not enough memory to allocate association" - " request!\n"); - return NULL; - } - } - - /* Copy current configuration attributes to the association request, - * but don't overwrite any that are already set. - */ - assoc_req = priv->pending_assoc_req; - if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { - memcpy(&assoc_req->ssid, &priv->curbssparams.ssid, - IEEE80211_MAX_SSID_LEN); - assoc_req->ssid_len = priv->curbssparams.ssid_len; - } - - if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) - assoc_req->channel = priv->channel; - - if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags)) - assoc_req->band = priv->curbssparams.band; - - if (!test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) - assoc_req->mode = priv->mode; - - if (!test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { - memcpy(&assoc_req->bssid, priv->curbssparams.bssid, - ETH_ALEN); - } - - if (!test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)) { - int i; - for (i = 0; i < 4; i++) { - memcpy(&assoc_req->wep_keys[i], &priv->wep_keys[i], - sizeof(struct enc_key)); - } - } - - if (!test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) - assoc_req->wep_tx_keyidx = priv->wep_tx_keyidx; - - if (!test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { - memcpy(&assoc_req->wpa_mcast_key, &priv->wpa_mcast_key, - sizeof(struct enc_key)); - } - - if (!test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { - memcpy(&assoc_req->wpa_unicast_key, &priv->wpa_unicast_key, - sizeof(struct enc_key)); - } - - if (!test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { - memcpy(&assoc_req->secinfo, &priv->secinfo, - sizeof(struct lbs_802_11_security)); - } - - if (!test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) { - memcpy(&assoc_req->wpa_ie, &priv->wpa_ie, - MAX_WPA_IE_LEN); - assoc_req->wpa_ie_len = priv->wpa_ie_len; - } - - lbs_deb_leave(LBS_DEB_ASSOC); - return assoc_req; -} - - -/** - * @brief Deauthenticate from a specific BSS - * - * @param priv A pointer to struct lbs_private structure - * @param bssid The specific BSS to deauthenticate from - * @param reason The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating - * - * @return 0 on success, error on failure - */ -int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN], - u16 reason) -{ - struct cmd_ds_802_11_deauthenticate cmd; - int ret; - - lbs_deb_enter(LBS_DEB_JOIN); - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - memcpy(cmd.macaddr, &bssid[0], ETH_ALEN); - cmd.reasoncode = cpu_to_le16(reason); - - ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd); - - /* Clean up everything even if there was an error; can't assume that - * we're still authenticated to the AP after trying to deauth. - */ - lbs_mac_event_disconnected(priv); - - lbs_deb_leave(LBS_DEB_JOIN); - return ret; -} - diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h deleted file mode 100644 index 40621b789fc50badee7356591147a36f07393e7b..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/assoc.h +++ /dev/null @@ -1,155 +0,0 @@ -/* Copyright (C) 2006, Red Hat, Inc. */ - -#ifndef _LBS_ASSOC_H_ -#define _LBS_ASSOC_H_ - - -#include "defs.h" -#include "host.h" - - -struct lbs_private; - -/* - * In theory, the IE is limited to the IE length, 255, - * but in practice 64 bytes are enough. - */ -#define MAX_WPA_IE_LEN 64 - - - -struct lbs_802_11_security { - u8 WPAenabled; - u8 WPA2enabled; - u8 wep_enabled; - u8 auth_mode; - u32 key_mgmt; -}; - -/** Current Basic Service Set State Structure */ -struct current_bss_params { - /** bssid */ - u8 bssid[ETH_ALEN]; - /** ssid */ - u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; - u8 ssid_len; - - /** band */ - u8 band; - /** channel is directly in priv->channel */ - /** zero-terminated array of supported data rates */ - u8 rates[MAX_RATES + 1]; -}; - -/** - * @brief Structure used to store information for each beacon/probe response - */ -struct bss_descriptor { - u8 bssid[ETH_ALEN]; - - u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; - u8 ssid_len; - - u16 capability; - u32 rssi; - u32 channel; - u16 beaconperiod; - __le16 atimwindow; - - /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */ - u8 mode; - - /* zero-terminated array of supported data rates */ - u8 rates[MAX_RATES + 1]; - - unsigned long last_scanned; - - union ieee_phy_param_set phy; - union ieee_ss_param_set ss; - - u8 wpa_ie[MAX_WPA_IE_LEN]; - size_t wpa_ie_len; - u8 rsn_ie[MAX_WPA_IE_LEN]; - size_t rsn_ie_len; - - u8 mesh; - - struct list_head list; -}; - -/** Association request - * - * Encapsulates all the options that describe a specific assocation request - * or configuration of the wireless card's radio, mode, and security settings. - */ -struct assoc_request { -#define ASSOC_FLAG_SSID 1 -#define ASSOC_FLAG_CHANNEL 2 -#define ASSOC_FLAG_BAND 3 -#define ASSOC_FLAG_MODE 4 -#define ASSOC_FLAG_BSSID 5 -#define ASSOC_FLAG_WEP_KEYS 6 -#define ASSOC_FLAG_WEP_TX_KEYIDX 7 -#define ASSOC_FLAG_WPA_MCAST_KEY 8 -#define ASSOC_FLAG_WPA_UCAST_KEY 9 -#define ASSOC_FLAG_SECINFO 10 -#define ASSOC_FLAG_WPA_IE 11 - unsigned long flags; - - u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; - u8 ssid_len; - u8 channel; - u8 band; - u8 mode; - u8 bssid[ETH_ALEN] __attribute__ ((aligned (2))); - - /** WEP keys */ - struct enc_key wep_keys[4]; - u16 wep_tx_keyidx; - - /** WPA keys */ - struct enc_key wpa_mcast_key; - struct enc_key wpa_unicast_key; - - struct lbs_802_11_security secinfo; - - /** WPA Information Elements*/ - u8 wpa_ie[MAX_WPA_IE_LEN]; - u8 wpa_ie_len; - - /* BSS to associate with for infrastructure of Ad-Hoc join */ - struct bss_descriptor bss; -}; - - -extern u8 lbs_bg_rates[MAX_RATES]; - -void lbs_association_worker(struct work_struct *work); -struct assoc_request *lbs_get_association_request(struct lbs_private *priv); - -int lbs_adhoc_stop(struct lbs_private *priv); - -int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, - u8 bssid[ETH_ALEN], u16 reason); - -int lbs_cmd_802_11_rssi(struct lbs_private *priv, - struct cmd_ds_command *cmd); -int lbs_ret_802_11_rssi(struct lbs_private *priv, - struct cmd_ds_command *resp); - -int lbs_cmd_bcn_ctrl(struct lbs_private *priv, - struct cmd_ds_command *cmd, - u16 cmd_action); -int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv, - struct cmd_ds_command *resp); - -int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action, - struct assoc_request *assoc); - -int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action, - uint16_t *enable); - -int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action, - struct assoc_request *assoc); - -#endif /* _LBS_ASSOC_H */ diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 9d5d3ccf08c854376e9a22d3364019b37512baf5..25f902760980c3348fe238d33a24331d1d31438f 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -7,8 +7,11 @@ */ #include +#include #include +#include +#include "decl.h" #include "cfg.h" #include "cmd.h" @@ -39,26 +42,27 @@ static struct ieee80211_channel lbs_2ghz_channels[] = { CHAN2G(14, 2484, 0), }; -#define RATETAB_ENT(_rate, _rateid, _flags) { \ - .bitrate = (_rate), \ - .hw_value = (_rateid), \ - .flags = (_flags), \ +#define RATETAB_ENT(_rate, _hw_value, _flags) { \ + .bitrate = (_rate), \ + .hw_value = (_hw_value), \ + .flags = (_flags), \ } +/* Table 6 in section 3.2.1.1 */ static struct ieee80211_rate lbs_rates[] = { - RATETAB_ENT(10, 0x1, 0), - RATETAB_ENT(20, 0x2, 0), - RATETAB_ENT(55, 0x4, 0), - RATETAB_ENT(110, 0x8, 0), - RATETAB_ENT(60, 0x10, 0), - RATETAB_ENT(90, 0x20, 0), - RATETAB_ENT(120, 0x40, 0), - RATETAB_ENT(180, 0x80, 0), - RATETAB_ENT(240, 0x100, 0), - RATETAB_ENT(360, 0x200, 0), - RATETAB_ENT(480, 0x400, 0), - RATETAB_ENT(540, 0x800, 0), + RATETAB_ENT(10, 0, 0), + RATETAB_ENT(20, 1, 0), + RATETAB_ENT(55, 2, 0), + RATETAB_ENT(110, 3, 0), + RATETAB_ENT(60, 9, 0), + RATETAB_ENT(90, 6, 0), + RATETAB_ENT(120, 7, 0), + RATETAB_ENT(180, 8, 0), + RATETAB_ENT(240, 9, 0), + RATETAB_ENT(360, 10, 0), + RATETAB_ENT(480, 11, 0), + RATETAB_ENT(540, 12, 0), }; static struct ieee80211_supported_band lbs_band_2ghz = { @@ -76,22 +80,1741 @@ static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_CCMP, }; +/* Time to stay on the channel */ +#define LBS_DWELL_PASSIVE 100 +#define LBS_DWELL_ACTIVE 40 +/*************************************************************************** + * Misc utility functions + * + * TLVs are Marvell specific. They are very similar to IEs, they have the + * same structure: type, length, data*. The only difference: for IEs, the + * type and length are u8, but for TLVs they're __le16. + */ + +/* + * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 + * in the firmware spec + */ +static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) +{ + int ret = -ENOTSUPP; + + switch (auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + case NL80211_AUTHTYPE_SHARED_KEY: + ret = auth_type; + break; + case NL80211_AUTHTYPE_AUTOMATIC: + ret = NL80211_AUTHTYPE_OPEN_SYSTEM; + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + ret = 0x80; + break; + default: + /* silence compiler */ + break; + } + return ret; +} + + +/* Various firmware commands need the list of supported rates, but with + the hight-bit set for basic rates */ +static int lbs_add_rates(u8 *rates) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) { + u8 rate = lbs_rates[i].bitrate / 5; + if (rate == 0x02 || rate == 0x04 || + rate == 0x0b || rate == 0x16) + rate |= 0x80; + rates[i] = rate; + } + return ARRAY_SIZE(lbs_rates); +} + + +/*************************************************************************** + * TLV utility functions + * + * TLVs are Marvell specific. They are very similar to IEs, they have the + * same structure: type, length, data*. The only difference: for IEs, the + * type and length are u8, but for TLVs they're __le16. + */ + + +/* + * Add ssid TLV + */ +#define LBS_MAX_SSID_TLV_SIZE \ + (sizeof(struct mrvl_ie_header) \ + + IEEE80211_MAX_SSID_LEN) + +static int lbs_add_ssid_tlv(u8 *tlv, const u8 *ssid, int ssid_len) +{ + struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv; + + /* + * TLV-ID SSID 00 00 + * length 06 00 + * ssid 4d 4e 54 45 53 54 + */ + ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID); + ssid_tlv->header.len = cpu_to_le16(ssid_len); + memcpy(ssid_tlv->ssid, ssid, ssid_len); + return sizeof(ssid_tlv->header) + ssid_len; +} + + +/* + * Add channel list TLV (section 8.4.2) + * + * Actual channel data comes from priv->wdev->wiphy->channels. + */ +#define LBS_MAX_CHANNEL_LIST_TLV_SIZE \ + (sizeof(struct mrvl_ie_header) \ + + (LBS_SCAN_BEFORE_NAP * sizeof(struct chanscanparamset))) + +static int lbs_add_channel_list_tlv(struct lbs_private *priv, u8 *tlv, + int last_channel, int active_scan) +{ + int chanscanparamsize = sizeof(struct chanscanparamset) * + (last_channel - priv->scan_channel); + + struct mrvl_ie_header *header = (void *) tlv; + + /* + * TLV-ID CHANLIST 01 01 + * length 0e 00 + * channel 00 01 00 00 00 64 00 + * radio type 00 + * channel 01 + * scan type 00 + * min scan time 00 00 + * max scan time 64 00 + * channel 2 00 02 00 00 00 64 00 + * + */ + + header->type = cpu_to_le16(TLV_TYPE_CHANLIST); + header->len = cpu_to_le16(chanscanparamsize); + tlv += sizeof(struct mrvl_ie_header); + + /* lbs_deb_scan("scan: channels %d to %d\n", priv->scan_channel, + last_channel); */ + memset(tlv, 0, chanscanparamsize); + + while (priv->scan_channel < last_channel) { + struct chanscanparamset *param = (void *) tlv; + + param->radiotype = CMD_SCAN_RADIO_TYPE_BG; + param->channumber = + priv->scan_req->channels[priv->scan_channel]->hw_value; + if (active_scan) { + param->maxscantime = cpu_to_le16(LBS_DWELL_ACTIVE); + } else { + param->chanscanmode.passivescan = 1; + param->maxscantime = cpu_to_le16(LBS_DWELL_PASSIVE); + } + tlv += sizeof(struct chanscanparamset); + priv->scan_channel++; + } + return sizeof(struct mrvl_ie_header) + chanscanparamsize; +} + + +/* + * Add rates TLV + * + * The rates are in lbs_bg_rates[], but for the 802.11b + * rates the high bit is set. We add this TLV only because + * there's a firmware which otherwise doesn't report all + * APs in range. + */ +#define LBS_MAX_RATES_TLV_SIZE \ + (sizeof(struct mrvl_ie_header) \ + + (ARRAY_SIZE(lbs_rates))) + +/* Adds a TLV with all rates the hardware supports */ +static int lbs_add_supported_rates_tlv(u8 *tlv) +{ + size_t i; + struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; + + /* + * TLV-ID RATES 01 00 + * length 0e 00 + * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c + */ + rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); + tlv += sizeof(rate_tlv->header); + i = lbs_add_rates(tlv); + tlv += i; + rate_tlv->header.len = cpu_to_le16(i); + return sizeof(rate_tlv->header) + i; +} + + +/* + * Adds a TLV with all rates the hardware *and* BSS supports. + */ +static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss) +{ + struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; + const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); + int n; + + /* + * 01 00 TLV_TYPE_RATES + * 04 00 len + * 82 84 8b 96 rates + */ + rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); + tlv += sizeof(rate_tlv->header); + + if (!rates_eid) { + /* Fallback: add basic 802.11b rates */ + *tlv++ = 0x82; + *tlv++ = 0x84; + *tlv++ = 0x8b; + *tlv++ = 0x96; + n = 4; + } else { + int hw, ap; + u8 ap_max = rates_eid[1]; + n = 0; + for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { + u8 hw_rate = lbs_rates[hw].bitrate / 5; + for (ap = 0; ap < ap_max; ap++) { + if (hw_rate == (rates_eid[ap+2] & 0x7f)) { + *tlv++ = rates_eid[ap+2]; + n++; + } + } + } + } + + rate_tlv->header.len = cpu_to_le16(n); + return sizeof(rate_tlv->header) + n; +} + + +/* + * Add auth type TLV. + * + * This is only needed for newer firmware (V9 and up). + */ +#define LBS_MAX_AUTH_TYPE_TLV_SIZE \ + sizeof(struct mrvl_ie_auth_type) + +static int lbs_add_auth_type_tlv(u8 *tlv, enum nl80211_auth_type auth_type) +{ + struct mrvl_ie_auth_type *auth = (void *) tlv; + + /* + * 1f 01 TLV_TYPE_AUTH_TYPE + * 01 00 len + * 01 auth type + */ + auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); + auth->header.len = cpu_to_le16(sizeof(*auth)-sizeof(auth->header)); + auth->auth = cpu_to_le16(lbs_auth_to_authtype(auth_type)); + return sizeof(*auth); +} + + +/* + * Add channel (phy ds) TLV + */ +#define LBS_MAX_CHANNEL_TLV_SIZE \ + sizeof(struct mrvl_ie_header) + +static int lbs_add_channel_tlv(u8 *tlv, u8 channel) +{ + struct mrvl_ie_ds_param_set *ds = (void *) tlv; + + /* + * 03 00 TLV_TYPE_PHY_DS + * 01 00 len + * 06 channel + */ + ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS); + ds->header.len = cpu_to_le16(sizeof(*ds)-sizeof(ds->header)); + ds->channel = channel; + return sizeof(*ds); +} + + +/* + * Add (empty) CF param TLV of the form: + */ +#define LBS_MAX_CF_PARAM_TLV_SIZE \ + sizeof(struct mrvl_ie_header) + +static int lbs_add_cf_param_tlv(u8 *tlv) +{ + struct mrvl_ie_cf_param_set *cf = (void *)tlv; + + /* + * 04 00 TLV_TYPE_CF + * 06 00 len + * 00 cfpcnt + * 00 cfpperiod + * 00 00 cfpmaxduration + * 00 00 cfpdurationremaining + */ + cf->header.type = cpu_to_le16(TLV_TYPE_CF); + cf->header.len = cpu_to_le16(sizeof(*cf)-sizeof(cf->header)); + return sizeof(*cf); +} + +/* + * Add WPA TLV + */ +#define LBS_MAX_WPA_TLV_SIZE \ + (sizeof(struct mrvl_ie_header) \ + + 128 /* TODO: I guessed the size */) + +static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len) +{ + size_t tlv_len; + + /* + * We need just convert an IE to an TLV. IEs use u8 for the header, + * u8 type + * u8 len + * u8[] data + * but TLVs use __le16 instead: + * __le16 type + * __le16 len + * u8[] data + */ + *tlv++ = *ie++; + *tlv++ = 0; + tlv_len = *tlv++ = *ie++; + *tlv++ = 0; + while (tlv_len--) + *tlv++ = *ie++; + /* the TLV is two bytes larger than the IE */ + return ie_len + 2; +} + +/*************************************************************************** + * Set Channel + */ + static int lbs_cfg_set_channel(struct wiphy *wiphy, struct net_device *netdev, - struct ieee80211_channel *chan, + struct ieee80211_channel *channel, enum nl80211_channel_type channel_type) { struct lbs_private *priv = wiphy_priv(wiphy); - int ret = -ENOTSUPP; + int ret = -ENOTSUPP; + + lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", + channel->center_freq, channel_type); + + if (channel_type != NL80211_CHAN_NO_HT) + goto out; + + ret = lbs_set_channel(priv, channel->hw_value); + + out: + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + + +/*************************************************************************** + * Scanning + */ + +/* + * When scanning, the firmware doesn't send a nul packet with the power-safe + * bit to the AP. So we cannot stay away from our current channel too long, + * otherwise we loose data. So take a "nap" while scanning every other + * while. + */ +#define LBS_SCAN_BEFORE_NAP 4 + + +/* + * When the firmware reports back a scan-result, it gives us an "u8 rssi", + * which isn't really an RSSI, as it becomes larger when moving away from + * the AP. Anyway, we need to convert that into mBm. + */ +#define LBS_SCAN_RSSI_TO_MBM(rssi) \ + ((-(int)rssi + 3)*100) + +static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, + struct cmd_header *resp) +{ + struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; + int bsssize; + const u8 *pos; + u16 nr_sets; + const u8 *tsfdesc; + int tsfsize; + int i; + int ret = -EILSEQ; + + lbs_deb_enter(LBS_DEB_CFG80211); + + bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); + nr_sets = le16_to_cpu(resp->size); + + /* + * The general layout of the scan response is described in chapter + * 5.7.1. Basically we have a common part, then any number of BSS + * descriptor sections. Finally we have section with the same number + * of TSFs. + * + * cmd_ds_802_11_scan_rsp + * cmd_header + * pos_size + * nr_sets + * bssdesc 1 + * bssid + * rssi + * timestamp + * intvl + * capa + * IEs + * bssdesc 2 + * bssdesc n + * MrvlIEtypes_TsfFimestamp_t + * TSF for BSS 1 + * TSF for BSS 2 + * TSF for BSS n + */ + + pos = scanresp->bssdesc_and_tlvbuffer; + + tsfdesc = pos + bsssize; + tsfsize = 4 + 8 * scanresp->nr_sets; + + /* Validity check: we expect a Marvell-Local TLV */ + i = get_unaligned_le16(tsfdesc); + tsfdesc += 2; + if (i != TLV_TYPE_TSFTIMESTAMP) + goto done; + /* Validity check: the TLV holds TSF values with 8 bytes each, so + * the size in the TLV must match the nr_sets value */ + i = get_unaligned_le16(tsfdesc); + tsfdesc += 2; + if (i / 8 != scanresp->nr_sets) + goto done; + + for (i = 0; i < scanresp->nr_sets; i++) { + const u8 *bssid; + const u8 *ie; + int left; + int ielen; + int rssi; + u16 intvl; + u16 capa; + int chan_no = -1; + const u8 *ssid = NULL; + u8 ssid_len = 0; + DECLARE_SSID_BUF(ssid_buf); + + int len = get_unaligned_le16(pos); + pos += 2; + + /* BSSID */ + bssid = pos; + pos += ETH_ALEN; + /* RSSI */ + rssi = *pos++; + /* Packet time stamp */ + pos += 8; + /* Beacon interval */ + intvl = get_unaligned_le16(pos); + pos += 2; + /* Capabilities */ + capa = get_unaligned_le16(pos); + pos += 2; + + /* To find out the channel, we must parse the IEs */ + ie = pos; + /* 6+1+8+2+2: size of BSSID, RSSI, time stamp, beacon + interval, capabilities */ + ielen = left = len - (6 + 1 + 8 + 2 + 2); + while (left >= 2) { + u8 id, elen; + id = *pos++; + elen = *pos++; + left -= 2; + if (elen > left || elen == 0) + goto done; + if (id == WLAN_EID_DS_PARAMS) + chan_no = *pos; + if (id == WLAN_EID_SSID) { + ssid = pos; + ssid_len = elen; + } + left -= elen; + pos += elen; + } + + /* No channel, no luck */ + if (chan_no != -1) { + struct wiphy *wiphy = priv->wdev->wiphy; + int freq = ieee80211_channel_to_frequency(chan_no); + struct ieee80211_channel *channel = + ieee80211_get_channel(wiphy, freq); + + lbs_deb_scan("scan: %pM, capa %04x, chan %2d, %s, " + "%d dBm\n", + bssid, capa, chan_no, + print_ssid(ssid_buf, ssid, ssid_len), + LBS_SCAN_RSSI_TO_MBM(rssi)/100); + + if (channel || + !(channel->flags & IEEE80211_CHAN_DISABLED)) + cfg80211_inform_bss(wiphy, channel, + bssid, le64_to_cpu(*(__le64 *)tsfdesc), + capa, intvl, ie, ielen, + LBS_SCAN_RSSI_TO_MBM(rssi), + GFP_KERNEL); + } + tsfdesc += 8; + } + ret = 0; + + done: + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); + return ret; +} + + +/* + * Our scan command contains a TLV, consting of a SSID TLV, a channel list + * TLV and a rates TLV. Determine the maximum size of them: + */ +#define LBS_SCAN_MAX_CMD_SIZE \ + (sizeof(struct cmd_ds_802_11_scan) \ + + LBS_MAX_SSID_TLV_SIZE \ + + LBS_MAX_CHANNEL_LIST_TLV_SIZE \ + + LBS_MAX_RATES_TLV_SIZE) + +/* + * Assumes priv->scan_req is initialized and valid + * Assumes priv->scan_channel is initialized + */ +static void lbs_scan_worker(struct work_struct *work) +{ + struct lbs_private *priv = + container_of(work, struct lbs_private, scan_work.work); + struct cmd_ds_802_11_scan *scan_cmd; + u8 *tlv; /* pointer into our current, growing TLV storage area */ + int last_channel; + int running, carrier; + + lbs_deb_enter(LBS_DEB_SCAN); + + scan_cmd = kzalloc(LBS_SCAN_MAX_CMD_SIZE, GFP_KERNEL); + if (scan_cmd == NULL) + goto out_no_scan_cmd; + + /* prepare fixed part of scan command */ + scan_cmd->bsstype = CMD_BSS_TYPE_ANY; + + /* stop network while we're away from our main channel */ + running = !netif_queue_stopped(priv->dev); + carrier = netif_carrier_ok(priv->dev); + if (running) + netif_stop_queue(priv->dev); + if (carrier) + netif_carrier_off(priv->dev); + + /* prepare fixed part of scan command */ + tlv = scan_cmd->tlvbuffer; + + /* add SSID TLV */ + if (priv->scan_req->n_ssids) + tlv += lbs_add_ssid_tlv(tlv, + priv->scan_req->ssids[0].ssid, + priv->scan_req->ssids[0].ssid_len); + + /* add channel TLVs */ + last_channel = priv->scan_channel + LBS_SCAN_BEFORE_NAP; + if (last_channel > priv->scan_req->n_channels) + last_channel = priv->scan_req->n_channels; + tlv += lbs_add_channel_list_tlv(priv, tlv, last_channel, + priv->scan_req->n_ssids); + + /* add rates TLV */ + tlv += lbs_add_supported_rates_tlv(tlv); + + if (priv->scan_channel < priv->scan_req->n_channels) { + cancel_delayed_work(&priv->scan_work); + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(300)); + } + + /* This is the final data we are about to send */ + scan_cmd->hdr.size = cpu_to_le16(tlv - (u8 *)scan_cmd); + lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd, + sizeof(*scan_cmd)); + lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer, + tlv - scan_cmd->tlvbuffer); + + __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr, + le16_to_cpu(scan_cmd->hdr.size), + lbs_ret_scan, 0); + + if (priv->scan_channel >= priv->scan_req->n_channels) { + /* Mark scan done */ + cfg80211_scan_done(priv->scan_req, false); + priv->scan_req = NULL; + } + + /* Restart network */ + if (carrier) + netif_carrier_on(priv->dev); + if (running && !priv->tx_pending_len) + netif_wake_queue(priv->dev); + + kfree(scan_cmd); + + out_no_scan_cmd: + lbs_deb_leave(LBS_DEB_SCAN); +} + + +static int lbs_cfg_scan(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_scan_request *request) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + int ret = 0; + + lbs_deb_enter(LBS_DEB_CFG80211); + + if (priv->scan_req || delayed_work_pending(&priv->scan_work)) { + /* old scan request not yet processed */ + ret = -EAGAIN; + goto out; + } + + lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", + request->n_ssids, request->n_channels, request->ie_len); + + priv->scan_channel = 0; + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(50)); + + if (priv->surpriseremoved) + ret = -EIO; + + priv->scan_req = request; + + out: + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + + + +/*************************************************************************** + * Events + */ + +void lbs_send_disconnect_notification(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_CFG80211); + + cfg80211_disconnected(priv->dev, + 0, + NULL, 0, + GFP_KERNEL); + + lbs_deb_leave(LBS_DEB_CFG80211); +} + +void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event) +{ + lbs_deb_enter(LBS_DEB_CFG80211); + + cfg80211_michael_mic_failure(priv->dev, + priv->assoc_bss, + event == MACREG_INT_CODE_MIC_ERR_MULTICAST ? + NL80211_KEYTYPE_GROUP : + NL80211_KEYTYPE_PAIRWISE, + -1, + NULL, + GFP_KERNEL); + + lbs_deb_leave(LBS_DEB_CFG80211); +} + + + + +/*************************************************************************** + * Connect/disconnect + */ + + +/* + * This removes all WEP keys + */ +static int lbs_remove_wep_keys(struct lbs_private *priv) +{ + struct cmd_ds_802_11_set_wep cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CFG80211); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.keyindex = cpu_to_le16(priv->wep_tx_key); + cmd.action = cpu_to_le16(CMD_ACT_REMOVE); + + ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd); + + lbs_deb_leave(LBS_DEB_CFG80211); + return ret; +} + +/* + * Set WEP keys + */ +static int lbs_set_wep_keys(struct lbs_private *priv) +{ + struct cmd_ds_802_11_set_wep cmd; + int i; + int ret; + + lbs_deb_enter(LBS_DEB_CFG80211); + + /* + * command 13 00 + * size 50 00 + * sequence xx xx + * result 00 00 + * action 02 00 ACT_ADD + * transmit key 00 00 + * type for key 1 01 WEP40 + * type for key 2 00 + * type for key 3 00 + * type for key 4 00 + * key 1 39 39 39 39 39 00 00 00 + * 00 00 00 00 00 00 00 00 + * key 2 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * key 3 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * key 4 00 00 00 00 00 00 00 00 + */ + if (priv->wep_key_len[0] || priv->wep_key_len[1] || + priv->wep_key_len[2] || priv->wep_key_len[3]) { + /* Only set wep keys if we have at least one of them */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.keyindex = cpu_to_le16(priv->wep_tx_key); + cmd.action = cpu_to_le16(CMD_ACT_ADD); + + for (i = 0; i < 4; i++) { + switch (priv->wep_key_len[i]) { + case WLAN_KEY_LEN_WEP40: + cmd.keytype[i] = CMD_TYPE_WEP_40_BIT; + break; + case WLAN_KEY_LEN_WEP104: + cmd.keytype[i] = CMD_TYPE_WEP_104_BIT; + break; + default: + cmd.keytype[i] = 0; + break; + } + memcpy(cmd.keymaterial[i], priv->wep_key[i], + priv->wep_key_len[i]); + } + + ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd); + } else { + /* Otherwise remove all wep keys */ + ret = lbs_remove_wep_keys(priv); + } + + lbs_deb_leave(LBS_DEB_CFG80211); + return ret; +} + + +/* + * Enable/Disable RSN status + */ +static int lbs_enable_rsn(struct lbs_private *priv, int enable) +{ + struct cmd_ds_802_11_enable_rsn cmd; + int ret; + + lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", enable); + + /* + * cmd 2f 00 + * size 0c 00 + * sequence xx xx + * result 00 00 + * action 01 00 ACT_SET + * enable 01 00 + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + cmd.enable = cpu_to_le16(enable); + + ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd); + + lbs_deb_leave(LBS_DEB_CFG80211); + return ret; +} + + +/* + * Set WPA/WPA key material + */ + +/* like "struct cmd_ds_802_11_key_material", but with cmd_header. Once we + * get rid of WEXT, this should go into host.h */ + +struct cmd_key_material { + struct cmd_header hdr; + + __le16 action; + struct MrvlIEtype_keyParamSet param; +} __packed; + +static int lbs_set_key_material(struct lbs_private *priv, + int key_type, + int key_info, + u8 *key, u16 key_len) +{ + struct cmd_key_material cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CFG80211); + + /* + * Example for WPA (TKIP): + * + * cmd 5e 00 + * size 34 00 + * sequence xx xx + * result 00 00 + * action 01 00 + * TLV type 00 01 key param + * length 00 26 + * key type 01 00 TKIP + * key info 06 00 UNICAST | ENABLED + * key len 20 00 + * key 32 bytes + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + cmd.param.type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL); + cmd.param.length = cpu_to_le16(sizeof(cmd.param) - 4); + cmd.param.keytypeid = cpu_to_le16(key_type); + cmd.param.keyinfo = cpu_to_le16(key_info); + cmd.param.keylen = cpu_to_le16(key_len); + if (key && key_len) + memcpy(cmd.param.key, key, key_len); + + ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd); + + lbs_deb_leave(LBS_DEB_CFG80211); + return ret; +} + + +/* + * Sets the auth type (open, shared, etc) in the firmware. That + * we use CMD_802_11_AUTHENTICATE is misleading, this firmware + * command doesn't send an authentication frame at all, it just + * stores the auth_type. + */ +static int lbs_set_authtype(struct lbs_private *priv, + struct cfg80211_connect_params *sme) +{ + struct cmd_ds_802_11_authenticate cmd; + int ret; + + lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", sme->auth_type); + + /* + * cmd 11 00 + * size 19 00 + * sequence xx xx + * result 00 00 + * BSS id 00 13 19 80 da 30 + * auth type 00 + * reserved 00 00 00 00 00 00 00 00 00 00 + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + if (sme->bssid) + memcpy(cmd.bssid, sme->bssid, ETH_ALEN); + /* convert auth_type */ + ret = lbs_auth_to_authtype(sme->auth_type); + if (ret < 0) + goto done; + + cmd.authtype = ret; + ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd); + + done: + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + +/* + * Create association request + */ +#define LBS_ASSOC_MAX_CMD_SIZE \ + (sizeof(struct cmd_ds_802_11_associate) \ + - 512 /* cmd_ds_802_11_associate.iebuf */ \ + + LBS_MAX_SSID_TLV_SIZE \ + + LBS_MAX_CHANNEL_TLV_SIZE \ + + LBS_MAX_CF_PARAM_TLV_SIZE \ + + LBS_MAX_AUTH_TYPE_TLV_SIZE \ + + LBS_MAX_WPA_TLV_SIZE) + +static int lbs_associate(struct lbs_private *priv, + struct cfg80211_bss *bss, + struct cfg80211_connect_params *sme) +{ + struct cmd_ds_802_11_associate_response *resp; + struct cmd_ds_802_11_associate *cmd = kzalloc(LBS_ASSOC_MAX_CMD_SIZE, + GFP_KERNEL); + const u8 *ssid_eid; + size_t len, resp_ie_len; + int status; + int ret; + u8 *pos = &(cmd->iebuf[0]); + + lbs_deb_enter(LBS_DEB_CFG80211); + + if (!cmd) { + ret = -ENOMEM; + goto done; + } + + /* + * cmd 50 00 + * length 34 00 + * sequence xx xx + * result 00 00 + * BSS id 00 13 19 80 da 30 + * capabilities 11 00 + * listen interval 0a 00 + * beacon interval 00 00 + * DTIM period 00 + * TLVs xx (up to 512 bytes) + */ + cmd->hdr.command = cpu_to_le16(CMD_802_11_ASSOCIATE); + + /* Fill in static fields */ + memcpy(cmd->bssid, bss->bssid, ETH_ALEN); + cmd->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL); + cmd->capability = cpu_to_le16(bss->capability); + + /* add SSID TLV */ + ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); + if (ssid_eid) + pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]); + else + lbs_deb_assoc("no SSID\n"); + + /* add DS param TLV */ + if (bss->channel) + pos += lbs_add_channel_tlv(pos, bss->channel->hw_value); + else + lbs_deb_assoc("no channel\n"); + + /* add (empty) CF param TLV */ + pos += lbs_add_cf_param_tlv(pos); + + /* add rates TLV */ + pos += lbs_add_common_rates_tlv(pos, bss); + + /* add auth type TLV */ + if (priv->fwrelease >= 0x09000000) + pos += lbs_add_auth_type_tlv(pos, sme->auth_type); + + /* add WPA/WPA2 TLV */ + if (sme->ie && sme->ie_len) + pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len); + + len = (sizeof(*cmd) - sizeof(cmd->iebuf)) + + (u16)(pos - (u8 *) &cmd->iebuf); + cmd->hdr.size = cpu_to_le16(len); + + /* store for later use */ + memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN); + + ret = lbs_cmd_with_response(priv, CMD_802_11_ASSOCIATE, cmd); + if (ret) + goto done; + + + /* generate connect message to cfg80211 */ + + resp = (void *) cmd; /* recast for easier field access */ + status = le16_to_cpu(resp->statuscode); + + /* Convert statis code of old firmware */ + if (priv->fwrelease < 0x09000000) + switch (status) { + case 0: + break; + case 1: + lbs_deb_assoc("invalid association parameters\n"); + status = WLAN_STATUS_CAPS_UNSUPPORTED; + break; + case 2: + lbs_deb_assoc("timer expired while waiting for AP\n"); + status = WLAN_STATUS_AUTH_TIMEOUT; + break; + case 3: + lbs_deb_assoc("association refused by AP\n"); + status = WLAN_STATUS_ASSOC_DENIED_UNSPEC; + break; + case 4: + lbs_deb_assoc("authentication refused by AP\n"); + status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION; + break; + default: + lbs_deb_assoc("association failure %d\n", status); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + } + + lbs_deb_assoc("status %d, capability 0x%04x\n", status, + le16_to_cpu(resp->capability)); + + resp_ie_len = le16_to_cpu(resp->hdr.size) + - sizeof(resp->hdr) + - 6; + cfg80211_connect_result(priv->dev, + priv->assoc_bss, + sme->ie, sme->ie_len, + resp->iebuf, resp_ie_len, + status, + GFP_KERNEL); + + if (status == 0) { + /* TODO: get rid of priv->connect_status */ + priv->connect_status = LBS_CONNECTED; + netif_carrier_on(priv->dev); + if (!priv->tx_pending_len) + netif_tx_wake_all_queues(priv->dev); + } + + +done: + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + + +static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + struct cfg80211_bss *bss = NULL; + int ret = 0; + u8 preamble = RADIO_PREAMBLE_SHORT; - lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type); + lbs_deb_enter(LBS_DEB_CFG80211); - if (channel_type != NL80211_CHAN_NO_HT) + if (sme->bssid) { + bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, + sme->ssid, sme->ssid_len, + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); + } else { + /* + * Here we have an impedance mismatch. The firmware command + * CMD_802_11_ASSOCIATE always needs a BSSID, it cannot + * connect otherwise. However, for the connect-API of + * cfg80211 the bssid is purely optional. We don't get one, + * except the user specifies one on the "iw" command line. + * + * If we don't got one, we could initiate a scan and look + * for the best matching cfg80211_bss entry. + * + * Or, better yet, net/wireless/sme.c get's rewritten into + * something more generally useful. + */ + lbs_pr_err("TODO: no BSS specified\n"); + ret = -ENOTSUPP; + goto done; + } + + + if (!bss) { + lbs_pr_err("assicate: bss %pM not in scan results\n", + sme->bssid); + ret = -ENOENT; + goto done; + } + lbs_deb_assoc("trying %pM", sme->bssid); + lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n", + sme->crypto.cipher_group, + sme->key_idx, sme->key_len); + + /* As this is a new connection, clear locally stored WEP keys */ + priv->wep_tx_key = 0; + memset(priv->wep_key, 0, sizeof(priv->wep_key)); + memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len)); + + /* set/remove WEP keys */ + switch (sme->crypto.cipher_group) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* Store provided WEP keys in priv-> */ + priv->wep_tx_key = sme->key_idx; + priv->wep_key_len[sme->key_idx] = sme->key_len; + memcpy(priv->wep_key[sme->key_idx], sme->key, sme->key_len); + /* Set WEP keys and WEP mode */ + lbs_set_wep_keys(priv); + priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE; + lbs_set_mac_control(priv); + /* No RSN mode for WEP */ + lbs_enable_rsn(priv, 0); + break; + case 0: /* there's no WLAN_CIPHER_SUITE_NONE definition */ + /* + * If we don't have no WEP, no WPA and no WPA2, + * we remove all keys like in the WPA/WPA2 setup, + * we just don't set RSN. + * + * Therefore: fall-throught + */ + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + /* Remove WEP keys and WEP mode */ + lbs_remove_wep_keys(priv); + priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE; + lbs_set_mac_control(priv); + + /* clear the WPA/WPA2 keys */ + lbs_set_key_material(priv, + KEY_TYPE_ID_WEP, /* doesn't matter */ + KEY_INFO_WPA_UNICAST, + NULL, 0); + lbs_set_key_material(priv, + KEY_TYPE_ID_WEP, /* doesn't matter */ + KEY_INFO_WPA_MCAST, + NULL, 0); + /* RSN mode for WPA/WPA2 */ + lbs_enable_rsn(priv, sme->crypto.cipher_group != 0); + break; + default: + lbs_pr_err("unsupported cipher group 0x%x\n", + sme->crypto.cipher_group); + ret = -ENOTSUPP; + goto done; + } + + lbs_set_authtype(priv, sme); + lbs_set_radio(priv, preamble, 1); + + /* Do the actual association */ + lbs_associate(priv, bss, sme); + + done: + if (bss) + cfg80211_put_bss(bss); + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + +static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + struct cmd_ds_802_11_deauthenticate cmd; + + lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code); + + /* store for lbs_cfg_ret_disconnect() */ + priv->disassoc_reason = reason_code; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + /* Mildly ugly to use a locally store my own BSSID ... */ + memcpy(cmd.macaddr, &priv->assoc_bss, ETH_ALEN); + cmd.reasoncode = cpu_to_le16(reason_code); + + if (lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd)) + return -EFAULT; + + cfg80211_disconnected(priv->dev, + priv->disassoc_reason, + NULL, 0, + GFP_KERNEL); + priv->connect_status = LBS_DISCONNECTED; + + return 0; +} + + +static int lbs_cfg_set_default_key(struct wiphy *wiphy, + struct net_device *netdev, + u8 key_index) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + + lbs_deb_enter(LBS_DEB_CFG80211); + + if (key_index != priv->wep_tx_key) { + lbs_deb_assoc("set_default_key: to %d\n", key_index); + priv->wep_tx_key = key_index; + lbs_set_wep_keys(priv); + } + + return 0; +} + + +static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev, + u8 idx, const u8 *mac_addr, + struct key_params *params) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + u16 key_info; + u16 key_type; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CFG80211); + + lbs_deb_assoc("add_key: cipher 0x%x, mac_addr %pM\n", + params->cipher, mac_addr); + lbs_deb_assoc("add_key: key index %d, key len %d\n", + idx, params->key_len); + if (params->key_len) + lbs_deb_hex(LBS_DEB_CFG80211, "KEY", + params->key, params->key_len); + + lbs_deb_assoc("add_key: seq len %d\n", params->seq_len); + if (params->seq_len) + lbs_deb_hex(LBS_DEB_CFG80211, "SEQ", + params->seq, params->seq_len); + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* actually compare if something has changed ... */ + if ((priv->wep_key_len[idx] != params->key_len) || + memcmp(priv->wep_key[idx], + params->key, params->key_len) != 0) { + priv->wep_key_len[idx] = params->key_len; + memcpy(priv->wep_key[idx], + params->key, params->key_len); + lbs_set_wep_keys(priv); + } + break; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + key_info = KEY_INFO_WPA_ENABLED | ((idx == 0) + ? KEY_INFO_WPA_UNICAST + : KEY_INFO_WPA_MCAST); + key_type = (params->cipher == WLAN_CIPHER_SUITE_TKIP) + ? KEY_TYPE_ID_TKIP + : KEY_TYPE_ID_AES; + lbs_set_key_material(priv, + key_type, + key_info, + params->key, params->key_len); + break; + default: + lbs_pr_err("unhandled cipher 0x%x\n", params->cipher); + ret = -ENOTSUPP; + break; + } + + return ret; +} + + +static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr) +{ + + lbs_deb_enter(LBS_DEB_CFG80211); + + lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n", + key_index, mac_addr); + +#ifdef TODO + struct lbs_private *priv = wiphy_priv(wiphy); + /* + * I think can keep this a NO-OP, because: + + * - we clear all keys whenever we do lbs_cfg_connect() anyway + * - neither "iw" nor "wpa_supplicant" won't call this during + * an ongoing connection + * - TODO: but I have to check if this is still true when + * I set the AP to periodic re-keying + * - we've not kzallec() something when we've added a key at + * lbs_cfg_connect() or lbs_cfg_add_key(). + * + * This causes lbs_cfg_del_key() only called at disconnect time, + * where we'd just waste time deleting a key that is not going + * to be used anyway. + */ + if (key_index < 3 && priv->wep_key_len[key_index]) { + priv->wep_key_len[key_index] = 0; + lbs_set_wep_keys(priv); + } +#endif + + return 0; +} + + +/*************************************************************************** + * Get station + */ + +static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + s8 signal, noise; + int ret; + size_t i; + + lbs_deb_enter(LBS_DEB_CFG80211); + + sinfo->filled |= STATION_INFO_TX_BYTES | + STATION_INFO_TX_PACKETS | + STATION_INFO_RX_BYTES | + STATION_INFO_RX_PACKETS; + sinfo->tx_bytes = priv->dev->stats.tx_bytes; + sinfo->tx_packets = priv->dev->stats.tx_packets; + sinfo->rx_bytes = priv->dev->stats.rx_bytes; + sinfo->rx_packets = priv->dev->stats.rx_packets; + + /* Get current RSSI */ + ret = lbs_get_rssi(priv, &signal, &noise); + if (ret == 0) { + sinfo->signal = signal; + sinfo->filled |= STATION_INFO_SIGNAL; + } + + /* Convert priv->cur_rate from hw_value to NL80211 value */ + for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) { + if (priv->cur_rate == lbs_rates[i].hw_value) { + sinfo->txrate.legacy = lbs_rates[i].bitrate; + sinfo->filled |= STATION_INFO_TX_BITRATE; + break; + } + } + + return 0; +} + + + + +/*************************************************************************** + * "Site survey", here just current channel and noise level + */ + +static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev, + int idx, struct survey_info *survey) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + s8 signal, noise; + int ret; + + if (idx != 0) + ret = -ENOENT; + + lbs_deb_enter(LBS_DEB_CFG80211); + + survey->channel = ieee80211_get_channel(wiphy, + ieee80211_channel_to_frequency(priv->channel)); + + ret = lbs_get_rssi(priv, &signal, &noise); + if (ret == 0) { + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = noise; + } + + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + + + +/*************************************************************************** + * Change interface + */ + +static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + int ret = 0; + + lbs_deb_enter(LBS_DEB_CFG80211); + + switch (type) { + case NL80211_IFTYPE_MONITOR: + ret = lbs_set_monitor_mode(priv, 1); + break; + case NL80211_IFTYPE_STATION: + if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) + ret = lbs_set_monitor_mode(priv, 0); + if (!ret) + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1); + break; + case NL80211_IFTYPE_ADHOC: + if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) + ret = lbs_set_monitor_mode(priv, 0); + if (!ret) + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2); + break; + default: + ret = -ENOTSUPP; + } + + if (!ret) + priv->wdev->iftype = type; + + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + + +/*************************************************************************** + * IBSS (Ad-Hoc) + */ + +/* The firmware needs the following bits masked out of the beacon-derived + * capability field when associating/joining to a BSS: + * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused) + */ +#define CAPINFO_MASK (~(0xda00)) + + +static void lbs_join_post(struct lbs_private *priv, + struct cfg80211_ibss_params *params, + u8 *bssid, u16 capability) +{ + u8 fake_ie[2 + IEEE80211_MAX_SSID_LEN + /* ssid */ + 2 + 4 + /* basic rates */ + 2 + 1 + /* DS parameter */ + 2 + 2 + /* atim */ + 2 + 8]; /* extended rates */ + u8 *fake = fake_ie; + + lbs_deb_enter(LBS_DEB_CFG80211); + + /* + * For cfg80211_inform_bss, we'll need a fake IE, as we can't get + * the real IE from the firmware. So we fabricate a fake IE based on + * what the firmware actually sends (sniffed with wireshark). + */ + /* Fake SSID IE */ + *fake++ = WLAN_EID_SSID; + *fake++ = params->ssid_len; + memcpy(fake, params->ssid, params->ssid_len); + fake += params->ssid_len; + /* Fake supported basic rates IE */ + *fake++ = WLAN_EID_SUPP_RATES; + *fake++ = 4; + *fake++ = 0x82; + *fake++ = 0x84; + *fake++ = 0x8b; + *fake++ = 0x96; + /* Fake DS channel IE */ + *fake++ = WLAN_EID_DS_PARAMS; + *fake++ = 1; + *fake++ = params->channel->hw_value; + /* Fake IBSS params IE */ + *fake++ = WLAN_EID_IBSS_PARAMS; + *fake++ = 2; + *fake++ = 0; /* ATIM=0 */ + *fake++ = 0; + /* Fake extended rates IE, TODO: don't add this for 802.11b only, + * but I don't know how this could be checked */ + *fake++ = WLAN_EID_EXT_SUPP_RATES; + *fake++ = 8; + *fake++ = 0x0c; + *fake++ = 0x12; + *fake++ = 0x18; + *fake++ = 0x24; + *fake++ = 0x30; + *fake++ = 0x48; + *fake++ = 0x60; + *fake++ = 0x6c; + lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie); + + cfg80211_inform_bss(priv->wdev->wiphy, + params->channel, + bssid, + 0, + capability, + params->beacon_interval, + fake_ie, fake - fake_ie, + 0, GFP_KERNEL); + + memcpy(priv->wdev->ssid, params->ssid, params->ssid_len); + priv->wdev->ssid_len = params->ssid_len; + + cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL); + + /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */ + priv->connect_status = LBS_CONNECTED; + netif_carrier_on(priv->dev); + if (!priv->tx_pending_len) + netif_wake_queue(priv->dev); + + lbs_deb_leave(LBS_DEB_CFG80211); +} + +static int lbs_ibss_join_existing(struct lbs_private *priv, + struct cfg80211_ibss_params *params, + struct cfg80211_bss *bss) +{ + const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); + struct cmd_ds_802_11_ad_hoc_join cmd; + u8 preamble = RADIO_PREAMBLE_SHORT; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CFG80211); + + /* TODO: set preamble based on scan result */ + ret = lbs_set_radio(priv, preamble, 1); + if (ret) + goto out; + + /* + * Example CMD_802_11_AD_HOC_JOIN command: + * + * command 2c 00 CMD_802_11_AD_HOC_JOIN + * size 65 00 + * sequence xx xx + * result 00 00 + * bssid 02 27 27 97 2f 96 + * ssid 49 42 53 53 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * type 02 CMD_BSS_TYPE_IBSS + * beacon period 64 00 + * dtim period 00 + * timestamp 00 00 00 00 00 00 00 00 + * localtime 00 00 00 00 00 00 00 00 + * IE DS 03 + * IE DS len 01 + * IE DS channel 01 + * reserveed 00 00 00 00 + * IE IBSS 06 + * IE IBSS len 02 + * IE IBSS atim 00 00 + * reserved 00 00 00 00 + * capability 02 00 + * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c 00 + * fail timeout ff 00 + * probe delay 00 00 + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + + memcpy(cmd.bss.bssid, bss->bssid, ETH_ALEN); + memcpy(cmd.bss.ssid, params->ssid, params->ssid_len); + cmd.bss.type = CMD_BSS_TYPE_IBSS; + cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval); + cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS; + cmd.bss.ds.header.len = 1; + cmd.bss.ds.channel = params->channel->hw_value; + cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS; + cmd.bss.ibss.header.len = 2; + cmd.bss.ibss.atimwindow = 0; + cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK); + + /* set rates to the intersection of our rates and the rates in the + bss */ + if (!rates_eid) { + lbs_add_rates(cmd.bss.rates); + } else { + int hw, i; + u8 rates_max = rates_eid[1]; + u8 *rates = cmd.bss.rates; + for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { + u8 hw_rate = lbs_rates[hw].bitrate / 5; + for (i = 0; i < rates_max; i++) { + if (hw_rate == (rates_eid[i+2] & 0x7f)) { + u8 rate = rates_eid[i+2]; + if (rate == 0x02 || rate == 0x04 || + rate == 0x0b || rate == 0x16) + rate |= 0x80; + *rates++ = rate; + } + } + } + } + + /* Only v8 and below support setting this */ + if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) { + cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); + cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); + } + ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); + if (ret) + goto out; + + /* + * This is a sample response to CMD_802_11_AD_HOC_JOIN: + * + * response 2c 80 + * size 09 00 + * sequence xx xx + * result 00 00 + * reserved 00 + */ + lbs_join_post(priv, params, bss->bssid, bss->capability); + + out: + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + + +static int lbs_ibss_start_new(struct lbs_private *priv, + struct cfg80211_ibss_params *params) +{ + struct cmd_ds_802_11_ad_hoc_start cmd; + struct cmd_ds_802_11_ad_hoc_result *resp = + (struct cmd_ds_802_11_ad_hoc_result *) &cmd; + u8 preamble = RADIO_PREAMBLE_SHORT; + int ret = 0; + u16 capability; + + lbs_deb_enter(LBS_DEB_CFG80211); + + ret = lbs_set_radio(priv, preamble, 1); + if (ret) + goto out; + + /* + * Example CMD_802_11_AD_HOC_START command: + * + * command 2b 00 CMD_802_11_AD_HOC_START + * size b1 00 + * sequence xx xx + * result 00 00 + * ssid 54 45 53 54 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 + * bss type 02 + * beacon period 64 00 + * dtim period 00 + * IE IBSS 06 + * IE IBSS len 02 + * IE IBSS atim 00 00 + * reserved 00 00 00 00 + * IE DS 03 + * IE DS len 01 + * IE DS channel 01 + * reserved 00 00 00 00 + * probe delay 00 00 + * capability 02 00 + * rates 82 84 8b 96 (basic rates with have bit 7 set) + * 0c 12 18 24 30 48 60 6c + * padding 100 bytes + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + memcpy(cmd.ssid, params->ssid, params->ssid_len); + cmd.bsstype = CMD_BSS_TYPE_IBSS; + cmd.beaconperiod = cpu_to_le16(params->beacon_interval); + cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS; + cmd.ibss.header.len = 2; + cmd.ibss.atimwindow = 0; + cmd.ds.header.id = WLAN_EID_DS_PARAMS; + cmd.ds.header.len = 1; + cmd.ds.channel = params->channel->hw_value; + /* Only v8 and below support setting probe delay */ + if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) + cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); + /* TODO: mix in WLAN_CAPABILITY_PRIVACY */ + capability = WLAN_CAPABILITY_IBSS; + cmd.capability = cpu_to_le16(capability); + lbs_add_rates(cmd.rates); + + + ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd); + if (ret) goto out; - ret = lbs_set_channel(priv, chan->hw_value); + /* + * This is a sample response to CMD_802_11_AD_HOC_JOIN: + * + * response 2b 80 + * size 14 00 + * sequence xx xx + * result 00 00 + * reserved 00 + * bssid 02 2b 7b 0f 86 0e + */ + lbs_join_post(priv, params, resp->bssid, capability); + + out: + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + +static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + int ret = 0; + struct cfg80211_bss *bss; + DECLARE_SSID_BUF(ssid_buf); + + lbs_deb_enter(LBS_DEB_CFG80211); + + if (!params->channel) { + ret = -ENOTSUPP; + goto out; + } + + ret = lbs_set_channel(priv, params->channel->hw_value); + if (ret) + goto out; + + /* Search if someone is beaconing. This assumes that the + * bss list is populated already */ + bss = cfg80211_get_bss(wiphy, params->channel, params->bssid, + params->ssid, params->ssid_len, + WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); + + if (bss) { + ret = lbs_ibss_join_existing(priv, params, bss); + cfg80211_put_bss(bss); + } else + ret = lbs_ibss_start_new(priv, params); + out: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); @@ -99,10 +1822,45 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy, } +static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + struct cmd_ds_802_11_ad_hoc_stop cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CFG80211); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd); + + /* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */ + lbs_mac_event_disconnected(priv); + + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + +/*************************************************************************** + * Initialization + */ + static struct cfg80211_ops lbs_cfg80211_ops = { .set_channel = lbs_cfg_set_channel, + .scan = lbs_cfg_scan, + .connect = lbs_cfg_connect, + .disconnect = lbs_cfg_disconnect, + .add_key = lbs_cfg_add_key, + .del_key = lbs_cfg_del_key, + .set_default_key = lbs_cfg_set_default_key, + .get_station = lbs_cfg_get_station, + .dump_survey = lbs_get_survey, + .change_virtual_intf = lbs_change_intf, + .join_ibss = lbs_join_ibss, + .leave_ibss = lbs_leave_ibss, }; @@ -142,6 +1900,36 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev) } +static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv) +{ + struct region_code_mapping { + const char *cn; + int code; + }; + + /* Section 5.17.2 */ + static struct region_code_mapping regmap[] = { + {"US ", 0x10}, /* US FCC */ + {"CA ", 0x20}, /* Canada */ + {"EU ", 0x30}, /* ETSI */ + {"ES ", 0x31}, /* Spain */ + {"FR ", 0x32}, /* France */ + {"JP ", 0x40}, /* Japan */ + }; + size_t i; + + lbs_deb_enter(LBS_DEB_CFG80211); + + for (i = 0; i < ARRAY_SIZE(regmap); i++) + if (regmap[i].code == priv->regioncode) { + regulatory_hint(priv->wdev->wiphy, regmap[i].cn); + break; + } + + lbs_deb_leave(LBS_DEB_CFG80211); +} + + /* * This function get's called after lbs_setup_firmware() determined the * firmware capabities. So we can setup the wiphy according to our @@ -157,10 +1945,12 @@ int lbs_cfg_register(struct lbs_private *priv) wdev->wiphy->max_scan_ssids = 1; wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - /* TODO: BIT(NL80211_IFTYPE_ADHOC); */ - wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wdev->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + if (lbs_rtap_supported(priv)) + wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); - /* TODO: honor priv->regioncode */ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz; /* @@ -169,6 +1959,7 @@ int lbs_cfg_register(struct lbs_private *priv) */ wdev->wiphy->cipher_suites = cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + wdev->wiphy->reg_notifier = lbs_reg_notifier; ret = wiphy_register(wdev->wiphy); if (ret < 0) @@ -180,10 +1971,36 @@ int lbs_cfg_register(struct lbs_private *priv) if (ret) lbs_pr_err("cannot register network device\n"); + INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); + + lbs_cfg_set_regulatory_hint(priv); + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } +int lbs_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + int ret; + + lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain " + "callback for domain %c%c\n", request->alpha2[0], + request->alpha2[1]); + + ret = lbs_set_11d_domain_info(priv, request, wiphy->bands); + + lbs_deb_leave(LBS_DEB_CFG80211); + return ret; +} + +void lbs_scan_deinit(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_CFG80211); + cancel_delayed_work_sync(&priv->scan_work); +} + void lbs_cfg_free(struct lbs_private *priv) { diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h index e09a193a34d6ace048f3da6b8144f8cd9046a4a3..4f46bb744bee8f4f8cb6432e09cf4a020180350d 100644 --- a/drivers/net/wireless/libertas/cfg.h +++ b/drivers/net/wireless/libertas/cfg.h @@ -1,16 +1,21 @@ #ifndef __LBS_CFG80211_H__ #define __LBS_CFG80211_H__ -#include "dev.h" +struct device; +struct lbs_private; +struct regulatory_request; +struct wiphy; struct wireless_dev *lbs_cfg_alloc(struct device *dev); int lbs_cfg_register(struct lbs_private *priv); void lbs_cfg_free(struct lbs_private *priv); -int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, - u8 ssid_len); -int lbs_scan_networks(struct lbs_private *priv, int full_scan); -void lbs_cfg_scan_worker(struct work_struct *work); +int lbs_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request); +void lbs_send_disconnect_notification(struct lbs_private *priv); +void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); + +void lbs_scan_deinit(struct lbs_private *priv); #endif diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index cdb9b9650d73aaf1de0114966e398defa4cae8d1..70745928f3f86ed5f26eea3975133177d0b36e52 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -6,18 +6,14 @@ #include #include #include +#include -#include "host.h" #include "decl.h" -#include "defs.h" -#include "dev.h" -#include "assoc.h" -#include "wext.h" -#include "scan.h" +#include "cfg.h" #include "cmd.h" - -static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv); +#define CAL_NF(nf) ((s32)(-(s32)(nf))) +#define CAL_RSSI(snr, nf) ((s32)((s32)(snr) + CAL_NF(nf))) /** * @brief Simple callback that copies response back into command @@ -70,36 +66,14 @@ static u8 is_command_allowed_in_ps(u16 cmd) switch (cmd) { case CMD_802_11_RSSI: return 1; + case CMD_802_11_HOST_SLEEP_CFG: + return 1; default: break; } return 0; } -/** - * @brief This function checks if the command is allowed. - * - * @param priv A pointer to lbs_private structure - * @return allowed or not allowed. - */ - -static int lbs_is_cmd_allowed(struct lbs_private *priv) -{ - int ret = 1; - - lbs_deb_enter(LBS_DEB_CMD); - - if (!priv->is_auto_deep_sleep_enabled) { - if (priv->is_deep_sleep) { - lbs_deb_cmd("command not allowed in deep sleep\n"); - ret = 0; - } - } - - lbs_deb_leave(LBS_DEB_CMD); - return ret; -} - /** * @brief Updates the hardware details like MAC address and regulatory region * @@ -175,16 +149,28 @@ int lbs_update_hw_spec(struct lbs_private *priv) if (priv->mesh_dev) memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN); - if (lbs_set_regiontable(priv, priv->regioncode, 0)) { - ret = -1; - goto out; - } - out: lbs_deb_leave(LBS_DEB_CMD); return ret; } +static int lbs_ret_host_sleep_cfg(struct lbs_private *priv, unsigned long dummy, + struct cmd_header *resp) +{ + lbs_deb_enter(LBS_DEB_CMD); + if (priv->is_host_sleep_activated) { + priv->is_host_sleep_configured = 0; + if (priv->psstate == PS_STATE_FULL_POWER) { + priv->is_host_sleep_activated = 0; + wake_up_interruptible(&priv->host_sleep_q); + } + } else { + priv->is_host_sleep_configured = 1; + } + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, struct wol_config *p_wol_config) { @@ -202,12 +188,11 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, else cmd_config.wol_conf.action = CMD_ACT_ACTION_NONE; - ret = lbs_cmd_with_response(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config); + ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config.hdr, + le16_to_cpu(cmd_config.hdr.size), + lbs_ret_host_sleep_cfg, 0); if (!ret) { - if (criteria) { - lbs_deb_cmd("Set WOL criteria to %x\n", criteria); - priv->wol_criteria = criteria; - } else + if (p_wol_config) memcpy((uint8_t *) p_wol_config, (uint8_t *)&cmd_config.wol_conf, sizeof(struct wol_config)); @@ -219,42 +204,49 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, } EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg); -static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd, - u16 cmd_action) +/** + * @brief Sets the Power Save mode + * + * @param priv A pointer to struct lbs_private structure + * @param cmd_action The Power Save operation (PS_MODE_ACTION_ENTER_PS or + * PS_MODE_ACTION_EXIT_PS) + * @param block Whether to block on a response or not + * + * @return 0 on success, error on failure + */ +int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block) { - struct cmd_ds_802_11_ps_mode *psm = &cmd->params.psmode; + struct cmd_ds_802_11_ps_mode cmd; + int ret = 0; lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); - cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + - sizeof(struct cmd_header)); - psm->action = cpu_to_le16(cmd_action); - psm->multipledtim = 0; - switch (cmd_action) { - case CMD_SUBCMD_ENTER_PS: - lbs_deb_cmd("PS command:" "SubCode- Enter PS\n"); - - psm->locallisteninterval = 0; - psm->nullpktinterval = 0; - psm->multipledtim = - cpu_to_le16(MRVDRV_DEFAULT_MULTIPLE_DTIM); - break; - - case CMD_SUBCMD_EXIT_PS: - lbs_deb_cmd("PS command:" "SubCode- Exit PS\n"); - break; - - case CMD_SUBCMD_SLEEP_CONFIRMED: - lbs_deb_cmd("PS command: SubCode- sleep confirm\n"); - break; + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(cmd_action); - default: - break; + if (cmd_action == PS_MODE_ACTION_ENTER_PS) { + lbs_deb_cmd("PS_MODE: action ENTER_PS\n"); + cmd.multipledtim = cpu_to_le16(1); /* Default DTIM multiple */ + } else if (cmd_action == PS_MODE_ACTION_EXIT_PS) { + lbs_deb_cmd("PS_MODE: action EXIT_PS\n"); + } else { + /* We don't handle CONFIRM_SLEEP here because it needs to + * be fastpathed to the firmware. + */ + lbs_deb_cmd("PS_MODE: unknown action 0x%X\n", cmd_action); + ret = -EOPNOTSUPP; + goto out; } - lbs_deb_leave(LBS_DEB_CMD); - return 0; + if (block) + ret = lbs_cmd_with_response(priv, CMD_802_11_PS_MODE, &cmd); + else + lbs_cmd_async(priv, CMD_802_11_PS_MODE, &cmd.hdr, sizeof (cmd)); + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; } int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, @@ -353,6 +345,65 @@ int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep) return ret; } +static int lbs_ret_host_sleep_activate(struct lbs_private *priv, + unsigned long dummy, + struct cmd_header *cmd) +{ + lbs_deb_enter(LBS_DEB_FW); + priv->is_host_sleep_activated = 1; + wake_up_interruptible(&priv->host_sleep_q); + lbs_deb_leave(LBS_DEB_FW); + return 0; +} + +int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep) +{ + struct cmd_header cmd; + int ret = 0; + uint32_t criteria = EHS_REMOVE_WAKEUP; + + lbs_deb_enter(LBS_DEB_CMD); + + if (host_sleep) { + if (priv->is_host_sleep_activated != 1) { + memset(&cmd, 0, sizeof(cmd)); + ret = lbs_host_sleep_cfg(priv, priv->wol_criteria, + (struct wol_config *)NULL); + if (ret) { + lbs_pr_info("Host sleep configuration failed: " + "%d\n", ret); + return ret; + } + if (priv->psstate == PS_STATE_FULL_POWER) { + ret = __lbs_cmd(priv, + CMD_802_11_HOST_SLEEP_ACTIVATE, + &cmd, + sizeof(cmd), + lbs_ret_host_sleep_activate, 0); + if (ret) + lbs_pr_info("HOST_SLEEP_ACTIVATE " + "failed: %d\n", ret); + } + + if (!wait_event_interruptible_timeout( + priv->host_sleep_q, + priv->is_host_sleep_activated, + (10 * HZ))) { + lbs_pr_err("host_sleep_q: timer expired\n"); + ret = -1; + } + } else { + lbs_pr_err("host sleep: already enabled\n"); + } + } else { + if (priv->is_host_sleep_activated) + ret = lbs_host_sleep_cfg(priv, criteria, + (struct wol_config *)NULL); + } + + return ret; +} + /** * @brief Set an SNMP MIB value * @@ -509,23 +560,35 @@ int lbs_set_tx_power(struct lbs_private *priv, s16 dbm) return ret; } -static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd, - u16 cmd_action, void *pdata_buf) +/** + * @brief Enable or disable monitor mode (only implemented on OLPC usb8388 FW) + * + * @param priv A pointer to struct lbs_private structure + * @param enable 1 to enable monitor mode, 0 to disable + * + * @return 0 on success, error on failure + */ +int lbs_set_monitor_mode(struct lbs_private *priv, int enable) { - struct cmd_ds_802_11_monitor_mode *monitor = &cmd->params.monitor; + struct cmd_ds_802_11_monitor_mode cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + if (enable) + cmd.mode = cpu_to_le16(0x1); - cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); - cmd->size = - cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) + - sizeof(struct cmd_header)); + lbs_deb_cmd("SET_MONITOR_MODE: %d\n", enable); - monitor->action = cpu_to_le16(cmd_action); - if (cmd_action == CMD_ACT_SET) { - monitor->mode = - cpu_to_le16((u16) (*(u32 *) pdata_buf)); + ret = lbs_cmd_with_response(priv, CMD_802_11_MONITOR_MODE, &cmd); + if (ret == 0) { + priv->dev->type = enable ? ARPHRD_IEEE80211_RADIOTAP : + ARPHRD_ETHER; } - return 0; + lbs_deb_leave(LBS_DEB_CMD); + return ret; } /** @@ -610,78 +673,242 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel) return ret; } -static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr, - u8 cmd_action, void *pdata_buf) +/** + * @brief Get current RSSI and noise floor + * + * @param priv A pointer to struct lbs_private structure + * @param rssi On successful return, signal level in mBm + * + * @return The channel on success, error on failure + */ +int lbs_get_rssi(struct lbs_private *priv, s8 *rssi, s8 *nf) { - struct lbs_offset_value *offval; + struct cmd_ds_802_11_rssi cmd; + int ret = 0; lbs_deb_enter(LBS_DEB_CMD); - offval = (struct lbs_offset_value *)pdata_buf; + BUG_ON(rssi == NULL); + BUG_ON(nf == NULL); - switch (le16_to_cpu(cmdptr->command)) { - case CMD_MAC_REG_ACCESS: - { - struct cmd_ds_mac_reg_access *macreg; + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + /* Average SNR over last 8 beacons */ + cmd.n_or_snr = cpu_to_le16(8); - cmdptr->size = - cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access) - + sizeof(struct cmd_header)); - macreg = - (struct cmd_ds_mac_reg_access *)&cmdptr->params. - macreg; + ret = lbs_cmd_with_response(priv, CMD_802_11_RSSI, &cmd); + if (ret == 0) { + *nf = CAL_NF(le16_to_cpu(cmd.nf)); + *rssi = CAL_RSSI(le16_to_cpu(cmd.n_or_snr), le16_to_cpu(cmd.nf)); + } - macreg->action = cpu_to_le16(cmd_action); - macreg->offset = cpu_to_le16((u16) offval->offset); - macreg->value = cpu_to_le32(offval->value); + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} - break; - } +/** + * @brief Send regulatory and 802.11d domain information to the firmware + * + * @param priv pointer to struct lbs_private + * @param request cfg80211 regulatory request structure + * @param bands the device's supported bands and channels + * + * @return 0 on success, error code on failure +*/ +int lbs_set_11d_domain_info(struct lbs_private *priv, + struct regulatory_request *request, + struct ieee80211_supported_band **bands) +{ + struct cmd_ds_802_11d_domain_info cmd; + struct mrvl_ie_domain_param_set *domain = &cmd.domain; + struct ieee80211_country_ie_triplet *t; + enum ieee80211_band band; + struct ieee80211_channel *ch; + u8 num_triplet = 0; + u8 num_parsed_chan = 0; + u8 first_channel = 0, next_chan = 0, max_pwr = 0; + u8 i, flag = 0; + size_t triplet_size; + int ret; - case CMD_BBP_REG_ACCESS: - { - struct cmd_ds_bbp_reg_access *bbpreg; + lbs_deb_enter(LBS_DEB_11D); - cmdptr->size = - cpu_to_le16(sizeof - (struct cmd_ds_bbp_reg_access) - + sizeof(struct cmd_header)); - bbpreg = - (struct cmd_ds_bbp_reg_access *)&cmdptr->params. - bbpreg; + memset(&cmd, 0, sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); - bbpreg->action = cpu_to_le16(cmd_action); - bbpreg->offset = cpu_to_le16((u16) offval->offset); - bbpreg->value = (u8) offval->value; + lbs_deb_11d("Setting country code '%c%c'\n", + request->alpha2[0], request->alpha2[1]); - break; - } + domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN); - case CMD_RF_REG_ACCESS: - { - struct cmd_ds_rf_reg_access *rfreg; + /* Set country code */ + domain->country_code[0] = request->alpha2[0]; + domain->country_code[1] = request->alpha2[1]; + domain->country_code[2] = ' '; - cmdptr->size = - cpu_to_le16(sizeof - (struct cmd_ds_rf_reg_access) + - sizeof(struct cmd_header)); - rfreg = - (struct cmd_ds_rf_reg_access *)&cmdptr->params. - rfreg; + /* Now set up the channel triplets; firmware is somewhat picky here + * and doesn't validate channel numbers and spans; hence it would + * interpret a triplet of (36, 4, 20) as channels 36, 37, 38, 39. Since + * the last 3 aren't valid channels, the driver is responsible for + * splitting that up into 4 triplet pairs of (36, 1, 20) + (40, 1, 20) + * etc. + */ + for (band = 0; + (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS); + band++) { + + if (!bands[band]) + continue; + + for (i = 0; + (i < bands[band]->n_channels) && (num_triplet < MAX_11D_TRIPLETS); + i++) { + ch = &bands[band]->channels[i]; + if (ch->flags & IEEE80211_CHAN_DISABLED) + continue; + + if (!flag) { + flag = 1; + next_chan = first_channel = (u32) ch->hw_value; + max_pwr = ch->max_power; + num_parsed_chan = 1; + continue; + } - rfreg->action = cpu_to_le16(cmd_action); - rfreg->offset = cpu_to_le16((u16) offval->offset); - rfreg->value = (u8) offval->value; + if ((ch->hw_value == next_chan + 1) && + (ch->max_power == max_pwr)) { + /* Consolidate adjacent channels */ + next_chan++; + num_parsed_chan++; + } else { + /* Add this triplet */ + lbs_deb_11d("11D triplet (%d, %d, %d)\n", + first_channel, num_parsed_chan, + max_pwr); + t = &domain->triplet[num_triplet]; + t->chans.first_channel = first_channel; + t->chans.num_channels = num_parsed_chan; + t->chans.max_power = max_pwr; + num_triplet++; + flag = 0; + } + } - break; + if (flag) { + /* Add last triplet */ + lbs_deb_11d("11D triplet (%d, %d, %d)\n", first_channel, + num_parsed_chan, max_pwr); + t = &domain->triplet[num_triplet]; + t->chans.first_channel = first_channel; + t->chans.num_channels = num_parsed_chan; + t->chans.max_power = max_pwr; + num_triplet++; } + } - default: - break; + lbs_deb_11d("# triplets %d\n", num_triplet); + + /* Set command header sizes */ + triplet_size = num_triplet * sizeof(struct ieee80211_country_ie_triplet); + domain->header.len = cpu_to_le16(sizeof(domain->country_code) + + triplet_size); + + lbs_deb_hex(LBS_DEB_11D, "802.11D domain param set", + (u8 *) &cmd.domain.country_code, + le16_to_cpu(domain->header.len)); + + cmd.hdr.size = cpu_to_le16(sizeof(cmd.hdr) + + sizeof(cmd.action) + + sizeof(cmd.domain.header) + + sizeof(cmd.domain.country_code) + + triplet_size); + + ret = lbs_cmd_with_response(priv, CMD_802_11D_DOMAIN_INFO, &cmd); + + lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret); + return ret; +} + +/** + * @brief Read a MAC, Baseband, or RF register + * + * @param priv pointer to struct lbs_private + * @param cmd register command, one of CMD_MAC_REG_ACCESS, + * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS + * @param offset byte offset of the register to get + * @param value on success, the value of the register at 'offset' + * + * @return 0 on success, error code on failure +*/ +int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value) +{ + struct cmd_ds_reg_access cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + BUG_ON(value == NULL); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_GET); + + if (reg != CMD_MAC_REG_ACCESS && + reg != CMD_BBP_REG_ACCESS && + reg != CMD_RF_REG_ACCESS) { + ret = -EINVAL; + goto out; } - lbs_deb_leave(LBS_DEB_CMD); - return 0; + ret = lbs_cmd_with_response(priv, reg, &cmd); + if (ret) { + if (reg == CMD_BBP_REG_ACCESS || reg == CMD_RF_REG_ACCESS) + *value = cmd.value.bbp_rf; + else if (reg == CMD_MAC_REG_ACCESS) + *value = le32_to_cpu(cmd.value.mac); + } + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Write a MAC, Baseband, or RF register + * + * @param priv pointer to struct lbs_private + * @param cmd register command, one of CMD_MAC_REG_ACCESS, + * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS + * @param offset byte offset of the register to set + * @param value the value to write to the register at 'offset' + * + * @return 0 on success, error code on failure +*/ +int lbs_set_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 value) +{ + struct cmd_ds_reg_access cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + + if (reg == CMD_BBP_REG_ACCESS || reg == CMD_RF_REG_ACCESS) + cmd.value.bbp_rf = (u8) (value & 0xFF); + else if (reg == CMD_MAC_REG_ACCESS) + cmd.value.mac = cpu_to_le32(value); + else { + ret = -EINVAL; + goto out; + } + + ret = lbs_cmd_with_response(priv, reg, &cmd); + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; } static void lbs_queue_cmd(struct lbs_private *priv, @@ -704,14 +931,17 @@ static void lbs_queue_cmd(struct lbs_private *priv, /* Exit_PS command needs to be queued in the header always. */ if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) { - struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf[1]; + struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf; - if (psm->action == cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { + if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { if (priv->psstate != PS_STATE_FULL_POWER) addtail = 0; } } + if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_WAKEUP_CONFIRM) + addtail = 0; + spin_lock_irqsave(&priv->driver_lock, flags); if (addtail) @@ -744,7 +974,6 @@ static void lbs_submit_command(struct lbs_private *priv, spin_lock_irqsave(&priv->driver_lock, flags); priv->cur_cmd = cmdnode; - priv->cur_cmd_retcode = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); cmdsize = le16_to_cpu(cmd->size); @@ -817,9 +1046,6 @@ static void lbs_cleanup_and_insert_cmd(struct lbs_private *priv, void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd, int result) { - if (cmd == priv->cur_cmd) - priv->cur_cmd_retcode = result; - cmd->result = result; cmd->cmdwaitqwoken = 1; wake_up_interruptible(&cmd->cmdwait_q); @@ -886,175 +1112,6 @@ void lbs_set_mac_control(struct lbs_private *priv) lbs_deb_leave(LBS_DEB_CMD); } -/** - * @brief This function prepare the command before send to firmware. - * - * @param priv A pointer to struct lbs_private structure - * @param cmd_no command number - * @param cmd_action command action: GET or SET - * @param wait_option wait option: wait response or not - * @param cmd_oid cmd oid: treated as sub command - * @param pdata_buf A pointer to informaion buffer - * @return 0 or -1 - */ -int lbs_prepare_and_send_command(struct lbs_private *priv, - u16 cmd_no, - u16 cmd_action, - u16 wait_option, u32 cmd_oid, void *pdata_buf) -{ - int ret = 0; - struct cmd_ctrl_node *cmdnode; - struct cmd_ds_command *cmdptr; - unsigned long flags; - - lbs_deb_enter(LBS_DEB_HOST); - - if (!priv) { - lbs_deb_host("PREP_CMD: priv is NULL\n"); - ret = -1; - goto done; - } - - if (priv->surpriseremoved) { - lbs_deb_host("PREP_CMD: card removed\n"); - ret = -1; - goto done; - } - - if (!lbs_is_cmd_allowed(priv)) { - ret = -EBUSY; - goto done; - } - - cmdnode = lbs_get_cmd_ctrl_node(priv); - - if (cmdnode == NULL) { - lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); - - /* Wake up main thread to execute next command */ - wake_up_interruptible(&priv->waitq); - ret = -1; - goto done; - } - - cmdnode->callback = NULL; - cmdnode->callback_arg = (unsigned long)pdata_buf; - - cmdptr = (struct cmd_ds_command *)cmdnode->cmdbuf; - - lbs_deb_host("PREP_CMD: command 0x%04x\n", cmd_no); - - /* Set sequence number, command and INT option */ - priv->seqnum++; - cmdptr->seqnum = cpu_to_le16(priv->seqnum); - - cmdptr->command = cpu_to_le16(cmd_no); - cmdptr->result = 0; - - switch (cmd_no) { - case CMD_802_11_PS_MODE: - ret = lbs_cmd_802_11_ps_mode(cmdptr, cmd_action); - break; - - case CMD_MAC_REG_ACCESS: - case CMD_BBP_REG_ACCESS: - case CMD_RF_REG_ACCESS: - ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf); - break; - - case CMD_802_11_MONITOR_MODE: - ret = lbs_cmd_802_11_monitor_mode(cmdptr, - cmd_action, pdata_buf); - break; - - case CMD_802_11_RSSI: - ret = lbs_cmd_802_11_rssi(priv, cmdptr); - break; - - case CMD_802_11_SET_AFC: - case CMD_802_11_GET_AFC: - - cmdptr->command = cpu_to_le16(cmd_no); - cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) + - sizeof(struct cmd_header)); - - memmove(&cmdptr->params.afc, - pdata_buf, sizeof(struct cmd_ds_802_11_afc)); - - ret = 0; - goto done; - - case CMD_802_11_TPC_CFG: - cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG); - cmdptr->size = - cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) + - sizeof(struct cmd_header)); - - memmove(&cmdptr->params.tpccfg, - pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg)); - - ret = 0; - break; - -#ifdef CONFIG_LIBERTAS_MESH - - case CMD_BT_ACCESS: - ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); - break; - - case CMD_FWT_ACCESS: - ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); - break; - -#endif - - case CMD_802_11_BEACON_CTRL: - ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); - break; - case CMD_802_11_DEEP_SLEEP: - cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP); - cmdptr->size = cpu_to_le16(sizeof(struct cmd_header)); - break; - default: - lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no); - ret = -1; - break; - } - - /* return error, since the command preparation failed */ - if (ret != 0) { - lbs_deb_host("PREP_CMD: command preparation failed\n"); - lbs_cleanup_and_insert_cmd(priv, cmdnode); - ret = -1; - goto done; - } - - cmdnode->cmdwaitqwoken = 0; - - lbs_queue_cmd(priv, cmdnode); - wake_up_interruptible(&priv->waitq); - - if (wait_option & CMD_OPTION_WAITFORRSP) { - lbs_deb_host("PREP_CMD: wait for response\n"); - might_sleep(); - wait_event_interruptible(cmdnode->cmdwait_q, - cmdnode->cmdwaitqwoken); - } - - spin_lock_irqsave(&priv->driver_lock, flags); - if (priv->cur_cmd_retcode) { - lbs_deb_host("PREP_CMD: command failed with return code %d\n", - priv->cur_cmd_retcode); - priv->cur_cmd_retcode = 0; - ret = -1; - } - spin_unlock_irqrestore(&priv->driver_lock, flags); - -done: - lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); - return ret; -} - /** * @brief This function allocates the command buffer and link * it to command free queue. @@ -1148,7 +1205,7 @@ int lbs_free_cmd_buffer(struct lbs_private *priv) * @param priv A pointer to struct lbs_private structure * @return cmd_ctrl_node A pointer to cmd_ctrl_node structure or NULL */ -static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv) +static struct cmd_ctrl_node *lbs_get_free_cmd_node(struct lbs_private *priv) { struct cmd_ctrl_node *tempnode; unsigned long flags; @@ -1231,10 +1288,10 @@ int lbs_execute_next_command(struct lbs_private *priv) /* * 1. Non-PS command: * Queue it. set needtowakeup to TRUE if current state - * is SLEEP, otherwise call lbs_ps_wakeup to send Exit_PS. - * 2. PS command but not Exit_PS: + * is SLEEP, otherwise call send EXIT_PS. + * 2. PS command but not EXIT_PS: * Ignore it. - * 3. PS command Exit_PS: + * 3. PS command EXIT_PS: * Set needtowakeup to TRUE if current state is SLEEP, * otherwise send this command down to firmware * immediately. @@ -1248,8 +1305,11 @@ int lbs_execute_next_command(struct lbs_private *priv) /* w/ new scheme, it will not reach here. since it is blocked in main_thread. */ priv->needtowakeup = 1; - } else - lbs_ps_wakeup(priv, 0); + } else { + lbs_set_ps_mode(priv, + PS_MODE_ACTION_EXIT_PS, + false); + } ret = 0; goto done; @@ -1264,7 +1324,7 @@ int lbs_execute_next_command(struct lbs_private *priv) "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", psm->action); if (psm->action != - cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { + cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); list_del(&cmdnode->list); @@ -1303,6 +1363,15 @@ int lbs_execute_next_command(struct lbs_private *priv) * check if in power save mode, if yes, put the device back * to PS mode */ +#ifdef TODO + /* + * This was the old code for libertas+wext. Someone that + * understands this beast should re-code it in a sane way. + * + * I actually don't understand why this is related to WPA + * and to connection status, shouldn't powering should be + * independ of such things? + */ if ((priv->psmode != LBS802_11POWERMODECAM) && (priv->psstate == PS_STATE_FULL_POWER) && ((priv->connect_status == LBS_CONNECTED) || @@ -1315,15 +1384,19 @@ int lbs_execute_next_command(struct lbs_private *priv) lbs_deb_host( "EXEC_NEXT_CMD: WPA enabled and GTK_SET" " go back to PS_SLEEP"); - lbs_ps_sleep(priv, 0); + lbs_set_ps_mode(priv, + PS_MODE_ACTION_ENTER_PS, + false); } } else { lbs_deb_host( "EXEC_NEXT_CMD: cmdpendingq empty, " "go back to PS_SLEEP"); - lbs_ps_sleep(priv, 0); + lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, + false); } } +#endif } ret = 0; @@ -1353,6 +1426,11 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) /* We don't get a response on the sleep-confirmation */ priv->dnld_sent = DNLD_RES_RECEIVED; + if (priv->is_host_sleep_configured) { + priv->is_host_sleep_activated = 1; + wake_up_interruptible(&priv->host_sleep_q); + } + /* If nothing to do, go back to sleep (?) */ if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) priv->psstate = PS_STATE_SLEEP; @@ -1363,43 +1441,6 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) lbs_deb_leave(LBS_DEB_HOST); } -void lbs_ps_sleep(struct lbs_private *priv, int wait_option) -{ - lbs_deb_enter(LBS_DEB_HOST); - - /* - * PS is currently supported only in Infrastructure mode - * Remove this check if it is to be supported in IBSS mode also - */ - - lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE, - CMD_SUBCMD_ENTER_PS, wait_option, 0, NULL); - - lbs_deb_leave(LBS_DEB_HOST); -} - -/** - * @brief This function sends Exit_PS command to firmware. - * - * @param priv A pointer to struct lbs_private structure - * @param wait_option wait response or not - * @return n/a - */ -void lbs_ps_wakeup(struct lbs_private *priv, int wait_option) -{ - __le32 Localpsmode; - - lbs_deb_enter(LBS_DEB_HOST); - - Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM); - - lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE, - CMD_SUBCMD_EXIT_PS, - wait_option, 0, &Localpsmode); - - lbs_deb_leave(LBS_DEB_HOST); -} - /** * @brief This function checks condition and prepares to * send sleep confirm command to firmware if ok. @@ -1524,12 +1565,18 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, goto done; } - if (!lbs_is_cmd_allowed(priv)) { - cmdnode = ERR_PTR(-EBUSY); - goto done; + /* No commands are allowed in Deep Sleep until we toggle the GPIO + * to wake up the card and it has signaled that it's ready. + */ + if (!priv->is_auto_deep_sleep_enabled) { + if (priv->is_deep_sleep) { + lbs_deb_cmd("command not allowed in deep sleep\n"); + cmdnode = ERR_PTR(-EBUSY); + goto done; + } } - cmdnode = lbs_get_cmd_ctrl_node(priv); + cmdnode = lbs_get_free_cmd_node(priv); if (cmdnode == NULL) { lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h index cb4138a55fdf9057fe13a0b8e1ced435cf0f0572..7109d6b717eace7a3bdae2914978c4ff2bdeb477 100644 --- a/drivers/net/wireless/libertas/cmd.h +++ b/drivers/net/wireless/libertas/cmd.h @@ -3,6 +3,8 @@ #ifndef _LBS_CMD_H_ #define _LBS_CMD_H_ +#include + #include "host.h" #include "dev.h" @@ -37,11 +39,6 @@ struct cmd_ctrl_node { #define lbs_cmd_with_response(priv, cmdnr, cmd) \ lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd)) -int lbs_prepare_and_send_command(struct lbs_private *priv, - u16 cmd_no, - u16 cmd_action, - u16 wait_option, u32 cmd_oid, void *pdata_buf); - void lbs_cmd_async(struct lbs_private *priv, uint16_t command, struct cmd_header *in_cmd, int in_cmd_size); @@ -92,10 +89,6 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, struct sleep_params *sp); -void lbs_ps_sleep(struct lbs_private *priv, int wait_option); - -void lbs_ps_wakeup(struct lbs_private *priv, int wait_option); - void lbs_ps_confirm_sleep(struct lbs_private *priv); int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); @@ -127,4 +120,20 @@ int lbs_set_tx_power(struct lbs_private *priv, s16 dbm); int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep); +int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep); + +int lbs_set_monitor_mode(struct lbs_private *priv, int enable); + +int lbs_get_rssi(struct lbs_private *priv, s8 *snr, s8 *nf); + +int lbs_set_11d_domain_info(struct lbs_private *priv, + struct regulatory_request *request, + struct ieee80211_supported_band **bands); + +int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value); + +int lbs_set_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 value); + +int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block); + #endif /* _LBS_CMD_H */ diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index 88f7131d66e9cba7e62f6a46076a2878a3c05716..5e95da9dcc2e2c99064d26d07264e06c1a272cfd 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -5,18 +5,11 @@ #include #include #include -#include -#include #include -#include +#include -#include "host.h" -#include "decl.h" +#include "cfg.h" #include "cmd.h" -#include "defs.h" -#include "dev.h" -#include "assoc.h" -#include "wext.h" /** * @brief This function handles disconnect event. it @@ -38,7 +31,9 @@ void lbs_mac_event_disconnected(struct lbs_private *priv) * It causes problem in the Supplicant */ msleep_interruptible(1000); - lbs_send_disconnect_notification(priv); + + if (priv->wdev->iftype == NL80211_IFTYPE_STATION) + lbs_send_disconnect_notification(priv); /* report disconnect to upper layer */ netif_stop_queue(priv->dev); @@ -49,141 +44,16 @@ void lbs_mac_event_disconnected(struct lbs_private *priv) priv->currenttxskb = NULL; priv->tx_pending_len = 0; - /* reset SNR/NF/RSSI values */ - memset(priv->SNR, 0x00, sizeof(priv->SNR)); - memset(priv->NF, 0x00, sizeof(priv->NF)); - memset(priv->RSSI, 0x00, sizeof(priv->RSSI)); - memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR)); - memset(priv->rawNF, 0x00, sizeof(priv->rawNF)); - priv->nextSNRNF = 0; - priv->numSNRNF = 0; priv->connect_status = LBS_DISCONNECTED; - /* Clear out associated SSID and BSSID since connection is - * no longer valid. - */ - memset(&priv->curbssparams.bssid, 0, ETH_ALEN); - memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN); - priv->curbssparams.ssid_len = 0; - if (priv->psstate != PS_STATE_FULL_POWER) { /* make firmware to exit PS mode */ lbs_deb_cmd("disconnected, so exit PS mode\n"); - lbs_ps_wakeup(priv, 0); + lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false); } lbs_deb_leave(LBS_DEB_ASSOC); } -static int lbs_ret_reg_access(struct lbs_private *priv, - u16 type, struct cmd_ds_command *resp) -{ - int ret = 0; - - lbs_deb_enter(LBS_DEB_CMD); - - switch (type) { - case CMD_RET(CMD_MAC_REG_ACCESS): - { - struct cmd_ds_mac_reg_access *reg = &resp->params.macreg; - - priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset); - priv->offsetvalue.value = le32_to_cpu(reg->value); - break; - } - - case CMD_RET(CMD_BBP_REG_ACCESS): - { - struct cmd_ds_bbp_reg_access *reg = &resp->params.bbpreg; - - priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset); - priv->offsetvalue.value = reg->value; - break; - } - - case CMD_RET(CMD_RF_REG_ACCESS): - { - struct cmd_ds_rf_reg_access *reg = &resp->params.rfreg; - - priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset); - priv->offsetvalue.value = reg->value; - break; - } - - default: - ret = -1; - } - - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); - return ret; -} - -static inline int handle_cmd_response(struct lbs_private *priv, - struct cmd_header *cmd_response) -{ - struct cmd_ds_command *resp = (struct cmd_ds_command *) cmd_response; - int ret = 0; - unsigned long flags; - uint16_t respcmd = le16_to_cpu(resp->command); - - lbs_deb_enter(LBS_DEB_HOST); - - switch (respcmd) { - case CMD_RET(CMD_MAC_REG_ACCESS): - case CMD_RET(CMD_BBP_REG_ACCESS): - case CMD_RET(CMD_RF_REG_ACCESS): - ret = lbs_ret_reg_access(priv, respcmd, resp); - break; - - case CMD_RET(CMD_802_11_SET_AFC): - case CMD_RET(CMD_802_11_GET_AFC): - spin_lock_irqsave(&priv->driver_lock, flags); - memmove((void *)priv->cur_cmd->callback_arg, &resp->params.afc, - sizeof(struct cmd_ds_802_11_afc)); - spin_unlock_irqrestore(&priv->driver_lock, flags); - - break; - - case CMD_RET(CMD_802_11_BEACON_STOP): - break; - - case CMD_RET(CMD_802_11_RSSI): - ret = lbs_ret_802_11_rssi(priv, resp); - break; - - case CMD_RET(CMD_802_11_TPC_CFG): - spin_lock_irqsave(&priv->driver_lock, flags); - memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg, - sizeof(struct cmd_ds_802_11_tpc_cfg)); - spin_unlock_irqrestore(&priv->driver_lock, flags); - break; - - case CMD_RET(CMD_BT_ACCESS): - spin_lock_irqsave(&priv->driver_lock, flags); - if (priv->cur_cmd->callback_arg) - memcpy((void *)priv->cur_cmd->callback_arg, - &resp->params.bt.addr1, 2 * ETH_ALEN); - spin_unlock_irqrestore(&priv->driver_lock, flags); - break; - case CMD_RET(CMD_FWT_ACCESS): - spin_lock_irqsave(&priv->driver_lock, flags); - if (priv->cur_cmd->callback_arg) - memcpy((void *)priv->cur_cmd->callback_arg, &resp->params.fwt, - sizeof(resp->params.fwt)); - spin_unlock_irqrestore(&priv->driver_lock, flags); - break; - case CMD_RET(CMD_802_11_BEACON_CTRL): - ret = lbs_ret_802_11_bcn_ctrl(priv, resp); - break; - - default: - lbs_pr_err("CMD_RESP: unknown cmd response 0x%04x\n", - le16_to_cpu(resp->command)); - break; - } - lbs_deb_leave(LBS_DEB_HOST); - return ret; -} - int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) { uint16_t respcmd, curcmd; @@ -242,9 +112,6 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) del_timer(&priv->command_timer); priv->cmd_timed_out = 0; - /* Store the response code to cur_cmd_retcode. */ - priv->cur_cmd_retcode = result; - if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1]; u16 action = le16_to_cpu(psmode->action); @@ -261,10 +128,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) * ad-hoc mode. It takes place in * lbs_execute_next_command(). */ - if (priv->mode == IW_MODE_ADHOC && - action == CMD_SUBCMD_ENTER_PS) + if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR && + action == PS_MODE_ACTION_ENTER_PS) priv->psmode = LBS802_11POWERMODECAM; - } else if (action == CMD_SUBCMD_ENTER_PS) { + } else if (action == PS_MODE_ACTION_ENTER_PS) { priv->needtowakeup = 0; priv->psstate = PS_STATE_AWAKE; @@ -279,11 +146,12 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) spin_unlock_irqrestore(&priv->driver_lock, flags); mutex_unlock(&priv->lock); - lbs_ps_wakeup(priv, 0); + lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, + false); mutex_lock(&priv->lock); spin_lock_irqsave(&priv->driver_lock, flags); } - } else if (action == CMD_SUBCMD_EXIT_PS) { + } else if (action == PS_MODE_ACTION_EXIT_PS) { priv->needtowakeup = 0; priv->psstate = PS_STATE_FULL_POWER; lbs_deb_host("CMD_RESP: EXIT_PS command response\n"); @@ -324,8 +192,7 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) if (priv->cur_cmd && priv->cur_cmd->callback) { ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg, resp); - } else - ret = handle_cmd_response(priv, resp); + } spin_lock_irqsave(&priv->driver_lock, flags); @@ -341,32 +208,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) return ret; } -static int lbs_send_confirmwake(struct lbs_private *priv) -{ - struct cmd_header cmd; - int ret = 0; - - lbs_deb_enter(LBS_DEB_HOST); - - cmd.command = cpu_to_le16(CMD_802_11_WAKEUP_CONFIRM); - cmd.size = cpu_to_le16(sizeof(cmd)); - cmd.seqnum = cpu_to_le16(++priv->seqnum); - cmd.result = 0; - - lbs_deb_hex(LBS_DEB_HOST, "wake confirm", (u8 *) &cmd, - sizeof(cmd)); - - ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &cmd, sizeof(cmd)); - if (ret) - lbs_pr_alert("SEND_WAKEC_CMD: Host to Card failed for Confirm Wake\n"); - - lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); - return ret; -} - int lbs_process_event(struct lbs_private *priv, u32 event) { int ret = 0; + struct cmd_header cmd; lbs_deb_enter(LBS_DEB_CMD); @@ -410,7 +255,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event) if (priv->reset_deep_sleep_wakeup) priv->reset_deep_sleep_wakeup(priv); priv->is_deep_sleep = 0; - lbs_send_confirmwake(priv); + lbs_cmd_async(priv, CMD_802_11_WAKEUP_CONFIRM, &cmd, + sizeof(cmd)); + priv->is_host_sleep_activated = 0; + wake_up_interruptible(&priv->host_sleep_q); break; case MACREG_INT_CODE_DEEP_SLEEP_AWAKE: @@ -441,7 +289,7 @@ int lbs_process_event(struct lbs_private *priv, u32 event) * in lbs_ps_wakeup() */ lbs_deb_cmd("waking up ...\n"); - lbs_ps_wakeup(priv, 0); + lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false); } break; diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index de2caac11dd61a37cc9d0ee591db84616f825f33..651a79c8de8a0d5cb59372a37720fddbc5dfd70c 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -1,18 +1,13 @@ -#include #include #include #include #include #include #include -#include -#include -#include "dev.h" #include "decl.h" -#include "host.h" -#include "debugfs.h" #include "cmd.h" +#include "debugfs.h" static struct dentry *lbs_dir; static char *szStates[] = { @@ -60,51 +55,6 @@ static ssize_t lbs_dev_info(struct file *file, char __user *userbuf, return res; } - -static ssize_t lbs_getscantable(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct lbs_private *priv = file->private_data; - size_t pos = 0; - int numscansdone = 0, res; - unsigned long addr = get_zeroed_page(GFP_KERNEL); - char *buf = (char *)addr; - DECLARE_SSID_BUF(ssid); - struct bss_descriptor * iter_bss; - if (!buf) - return -ENOMEM; - - pos += snprintf(buf+pos, len-pos, - "# | ch | rssi | bssid | cap | Qual | SSID\n"); - - mutex_lock(&priv->lock); - list_for_each_entry (iter_bss, &priv->network_list, list) { - u16 ibss = (iter_bss->capability & WLAN_CAPABILITY_IBSS); - u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY); - u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT); - - pos += snprintf(buf+pos, len-pos, "%02u| %03d | %04d | %pM |", - numscansdone, iter_bss->channel, iter_bss->rssi, - iter_bss->bssid); - pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability); - pos += snprintf(buf+pos, len-pos, "%c%c%c |", - ibss ? 'A' : 'I', privacy ? 'P' : ' ', - spectrum_mgmt ? 'S' : ' '); - pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi)); - pos += snprintf(buf+pos, len-pos, " %s\n", - print_ssid(ssid, iter_bss->ssid, - iter_bss->ssid_len)); - - numscansdone++; - } - mutex_unlock(&priv->lock); - - res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); - - free_page(addr); - return res; -} - static ssize_t lbs_sleepparams_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) @@ -174,6 +124,70 @@ static ssize_t lbs_sleepparams_read(struct file *file, char __user *userbuf, return ret; } +static ssize_t lbs_host_sleep_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + ssize_t buf_size, ret; + int host_sleep; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, user_buf, buf_size)) { + ret = -EFAULT; + goto out_unlock; + } + ret = sscanf(buf, "%d", &host_sleep); + if (ret != 1) { + ret = -EINVAL; + goto out_unlock; + } + + if (host_sleep == 0) + ret = lbs_set_host_sleep(priv, 0); + else if (host_sleep == 1) { + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { + lbs_pr_info("wake parameters not configured"); + ret = -EINVAL; + goto out_unlock; + } + ret = lbs_set_host_sleep(priv, 1); + } else { + lbs_pr_err("invalid option\n"); + ret = -EINVAL; + } + + if (!ret) + ret = count; + +out_unlock: + free_page(addr); + return ret; +} + +static ssize_t lbs_host_sleep_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + ssize_t ret; + size_t pos = 0; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + pos += snprintf(buf, len, "%d\n", priv->is_host_sleep_activated); + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + + free_page(addr); + return ret; +} + /* * When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might * get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the @@ -432,30 +446,24 @@ static ssize_t lbs_bcnmiss_write(struct file *file, const char __user *userbuf, } - static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - struct lbs_offset_value offval; ssize_t pos = 0; int ret; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; + u32 val = 0; + if (!buf) return -ENOMEM; - offval.offset = priv->mac_offset; - offval.value = 0; - - ret = lbs_prepare_and_send_command(priv, - CMD_MAC_REG_ACCESS, 0, - CMD_OPTION_WAITFORRSP, 0, &offval); + ret = lbs_get_reg(priv, CMD_MAC_REG_ACCESS, priv->mac_offset, &val); mdelay(10); if (!ret) { - pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", - priv->mac_offset, priv->offsetvalue.value); - + pos = snprintf(buf, len, "MAC[0x%x] = 0x%08x\n", + priv->mac_offset, val); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); } free_page(addr); @@ -493,7 +501,6 @@ static ssize_t lbs_wrmac_write(struct file *file, struct lbs_private *priv = file->private_data; ssize_t res, buf_size; u32 offset, value; - struct lbs_offset_value offval; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; if (!buf) @@ -510,11 +517,7 @@ static ssize_t lbs_wrmac_write(struct file *file, goto out_unlock; } - offval.offset = offset; - offval.value = value; - res = lbs_prepare_and_send_command(priv, - CMD_MAC_REG_ACCESS, 1, - CMD_OPTION_WAITFORRSP, 0, &offval); + res = lbs_set_reg(priv, CMD_MAC_REG_ACCESS, offset, value); mdelay(10); if (!res) @@ -528,25 +531,20 @@ static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - struct lbs_offset_value offval; ssize_t pos = 0; int ret; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; + u32 val; + if (!buf) return -ENOMEM; - offval.offset = priv->bbp_offset; - offval.value = 0; - - ret = lbs_prepare_and_send_command(priv, - CMD_BBP_REG_ACCESS, 0, - CMD_OPTION_WAITFORRSP, 0, &offval); + ret = lbs_get_reg(priv, CMD_BBP_REG_ACCESS, priv->bbp_offset, &val); mdelay(10); if (!ret) { - pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", - priv->bbp_offset, priv->offsetvalue.value); - + pos = snprintf(buf, len, "BBP[0x%x] = 0x%08x\n", + priv->bbp_offset, val); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); } free_page(addr); @@ -585,7 +583,6 @@ static ssize_t lbs_wrbbp_write(struct file *file, struct lbs_private *priv = file->private_data; ssize_t res, buf_size; u32 offset, value; - struct lbs_offset_value offval; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; if (!buf) @@ -602,11 +599,7 @@ static ssize_t lbs_wrbbp_write(struct file *file, goto out_unlock; } - offval.offset = offset; - offval.value = value; - res = lbs_prepare_and_send_command(priv, - CMD_BBP_REG_ACCESS, 1, - CMD_OPTION_WAITFORRSP, 0, &offval); + res = lbs_set_reg(priv, CMD_BBP_REG_ACCESS, offset, value); mdelay(10); if (!res) @@ -620,25 +613,20 @@ static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct lbs_private *priv = file->private_data; - struct lbs_offset_value offval; ssize_t pos = 0; int ret; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; + u32 val; + if (!buf) return -ENOMEM; - offval.offset = priv->rf_offset; - offval.value = 0; - - ret = lbs_prepare_and_send_command(priv, - CMD_RF_REG_ACCESS, 0, - CMD_OPTION_WAITFORRSP, 0, &offval); + ret = lbs_get_reg(priv, CMD_RF_REG_ACCESS, priv->rf_offset, &val); mdelay(10); if (!ret) { - pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", - priv->rf_offset, priv->offsetvalue.value); - + pos = snprintf(buf, len, "RF[0x%x] = 0x%08x\n", + priv->rf_offset, val); ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); } free_page(addr); @@ -677,7 +665,6 @@ static ssize_t lbs_wrrf_write(struct file *file, struct lbs_private *priv = file->private_data; ssize_t res, buf_size; u32 offset, value; - struct lbs_offset_value offval; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; if (!buf) @@ -694,11 +681,7 @@ static ssize_t lbs_wrrf_write(struct file *file, goto out_unlock; } - offval.offset = offset; - offval.value = value; - res = lbs_prepare_and_send_command(priv, - CMD_RF_REG_ACCESS, 1, - CMD_OPTION_WAITFORRSP, 0, &offval); + res = lbs_set_reg(priv, CMD_RF_REG_ACCESS, offset, value); mdelay(10); if (!res) @@ -723,10 +706,10 @@ struct lbs_debugfs_files { static const struct lbs_debugfs_files debugfs_files[] = { { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, - { "getscantable", 0444, FOPS(lbs_getscantable, - write_file_dummy), }, { "sleepparams", 0644, FOPS(lbs_sleepparams_read, lbs_sleepparams_write), }, + { "hostsleep", 0644, FOPS(lbs_host_sleep_read, + lbs_host_sleep_write), }, }; static const struct lbs_debugfs_files debugfs_events_files[] = { @@ -891,7 +874,7 @@ static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf, p = buf; - d = (struct debug_data *)file->private_data; + d = file->private_data; for (i = 0; i < num_of_items; i++) { if (d[i].size == 1) @@ -930,7 +913,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, char *p0; char *p1; char *p2; - struct debug_data *d = (struct debug_data *)f->private_data; + struct debug_data *d = f->private_data; pdata = kmalloc(cnt, GFP_KERNEL); if (pdata == NULL) diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h index 709ffcad22ad68953104b0f5fbfd8b4f6c5e5f4b..1d141fefd7673bf89c9a5554aa6f6bcbb49aac9e 100644 --- a/drivers/net/wireless/libertas/decl.h +++ b/drivers/net/wireless/libertas/decl.h @@ -1,3 +1,4 @@ + /** * This file contains declaration referring to * functions defined in other source files @@ -12,6 +13,7 @@ struct lbs_private; struct sk_buff; struct net_device; +struct cmd_ds_command; /* ethtool.c */ @@ -34,11 +36,13 @@ int lbs_start_card(struct lbs_private *priv); void lbs_stop_card(struct lbs_private *priv); void lbs_host_to_card_done(struct lbs_private *priv); +int lbs_rtap_supported(struct lbs_private *priv); + int lbs_set_mac_address(struct net_device *dev, void *addr); void lbs_set_multicast_list(struct net_device *dev); int lbs_suspend(struct lbs_private *priv); -void lbs_resume(struct lbs_private *priv); +int lbs_resume(struct lbs_private *priv); void lbs_queue_event(struct lbs_private *priv, u32 event); void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx); @@ -49,5 +53,4 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv); u32 lbs_fw_index_to_data_rate(u8 index); u8 lbs_data_rate_to_fw_index(u32 rate); - #endif diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h index ea3f10ef4e0032c845a15cf973b1c9ffe925b504..d00c728cec47cdd731c68ff64190c7054d374e5e 100644 --- a/drivers/net/wireless/libertas/defs.h +++ b/drivers/net/wireless/libertas/defs.h @@ -172,11 +172,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in #define MRVDRV_MAX_BSS_DESCRIPTS 16 #define MRVDRV_MAX_REGION_CODE 6 -#define MRVDRV_IGNORE_MULTIPLE_DTIM 0xfffe -#define MRVDRV_MIN_MULTIPLE_DTIM 1 -#define MRVDRV_MAX_MULTIPLE_DTIM 5 -#define MRVDRV_DEFAULT_MULTIPLE_DTIM 1 - #define MRVDRV_DEFAULT_LISTEN_INTERVAL 10 #define MRVDRV_CHANNELS_PER_SCAN 4 @@ -301,19 +296,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in #define BAND_G (0x02) #define ALL_802_11_BANDS (BAND_B | BAND_G) -/** MACRO DEFINITIONS */ -#define CAL_NF(NF) ((s32)(-(s32)(NF))) -#define CAL_RSSI(SNR, NF) ((s32)((s32)(SNR) + CAL_NF(NF))) -#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI))) - -#define DEFAULT_BCN_AVG_FACTOR 8 -#define DEFAULT_DATA_AVG_FACTOR 8 -#define AVG_SCALE 100 -#define CAL_AVG_SNR_NF(AVG, SNRNF, N) \ - (((AVG) == 0) ? ((u16)(SNRNF) * AVG_SCALE) : \ - ((((int)(AVG) * (N -1)) + ((u16)(SNRNF) * \ - AVG_SCALE)) / N)) - #define MAX_RATES 14 #define MAX_LEDS 8 diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index a54880e4ad2b99e00eeb0c3114bd796adafbd53b..3c7e255e18c7617e34af21c23af288f68047ab34 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -7,8 +7,8 @@ #define _LBS_DEV_H_ #include "mesh.h" -#include "scan.h" -#include "assoc.h" +#include "defs.h" +#include "host.h" #include @@ -29,7 +29,6 @@ struct lbs_private { /* Basic networking */ struct net_device *dev; u32 connect_status; - int infra_open; struct work_struct mcast_work; u32 nr_of_multicastmacaddr; u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN]; @@ -37,6 +36,9 @@ struct lbs_private { /* CFG80211 */ struct wireless_dev *wdev; bool wiphy_registered; + struct cfg80211_scan_request *scan_req; + u8 assoc_bss[ETH_ALEN]; + u8 disassoc_reason; /* Mesh */ struct net_device *mesh_dev; /* Virtual device */ @@ -49,10 +51,6 @@ struct lbs_private { u8 mesh_ssid_len; #endif - /* Monitor mode */ - struct net_device *rtap_net_dev; - u32 monitormode; - /* Debugfs */ struct dentry *debugfs_dir; struct dentry *debugfs_debug; @@ -66,7 +64,6 @@ struct lbs_private { u32 mac_offset; u32 bbp_offset; u32 rf_offset; - struct lbs_offset_value offsetvalue; /* Power management */ u16 psmode; @@ -75,6 +72,7 @@ struct lbs_private { /* Deep sleep */ int is_deep_sleep; + int deep_sleep_required; int is_auto_deep_sleep_enabled; int wakeup_dev_required; int is_activity_detected; @@ -82,6 +80,11 @@ struct lbs_private { wait_queue_head_t ds_awake_q; struct timer_list auto_deepsleep_timer; + /* Host sleep*/ + int is_host_sleep_configured; + int is_host_sleep_activated; + wait_queue_head_t host_sleep_q; + /* Hardware access */ void *card; u8 fw_ready; @@ -108,12 +111,10 @@ struct lbs_private { struct cmd_ctrl_node *cur_cmd; struct list_head cmdfreeq; /* free command buffers */ struct list_head cmdpendingq; /* pending command buffers */ - wait_queue_head_t cmd_pending; struct timer_list command_timer; int cmd_timed_out; /* Command responses sent from the hardware to the driver */ - int cur_cmd_retcode; u8 resp_idx; u8 resp_buf[2][LBS_UPLD_SIZE]; u32 resp_len[2]; @@ -127,14 +128,10 @@ struct lbs_private { struct workqueue_struct *work_thread; /** Encryption stuff */ - struct lbs_802_11_security secinfo; - struct enc_key wpa_mcast_key; - struct enc_key wpa_unicast_key; - u8 wpa_ie[MAX_WPA_IE_LEN]; - u8 wpa_ie_len; - u16 wep_tx_keyidx; - struct enc_key wep_keys[4]; u8 authtype_auto; + u8 wep_tx_key; + u8 wep_key[4][WLAN_KEY_LEN_WEP104]; + u8 wep_key_len[4]; /* Wake On LAN */ uint32_t wol_criteria; @@ -155,6 +152,7 @@ struct lbs_private { /* NIC/link operation characteristics */ u16 mac_control; u8 radio_on; + u8 cur_rate; u8 channel; s16 txpower_cur; s16 txpower_min; @@ -163,42 +161,6 @@ struct lbs_private { /** Scanning */ struct delayed_work scan_work; int scan_channel; - /* remember which channel was scanned last, != 0 if currently scanning */ - u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1]; - u8 scan_ssid_len; - - /* Associating */ - struct delayed_work assoc_work; - struct current_bss_params curbssparams; - u8 mode; - struct list_head network_list; - struct list_head network_free_list; - struct bss_descriptor *networks; - struct assoc_request * pending_assoc_req; - struct assoc_request * in_progress_assoc_req; - uint16_t enablehwauto; - - /* ADHOC */ - u16 beacon_period; - u8 beacon_enable; - u8 adhoccreate; - - /* WEXT */ - char name[DEV_NAME_LEN]; - u8 nodename[16]; - struct iw_statistics wstats; - u8 cur_rate; -#define MAX_REGION_CHANNEL_NUM 2 - struct region_channel region_channel[MAX_REGION_CHANNEL_NUM]; - - /** Requested Signal Strength*/ - u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG]; - u16 NF[MAX_TYPE_B][MAX_TYPE_AVG]; - u8 RSSI[MAX_TYPE_B][MAX_TYPE_AVG]; - u8 rawSNR[DEFAULT_DATA_AVG_FACTOR]; - u8 rawNF[DEFAULT_DATA_AVG_FACTOR]; - u16 nextSNRNF; - u16 numSNRNF; }; extern struct cmd_confirm_sleep confirm_sleep; diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c index 3804a58d7f4e8903cb8788457c8b99f8bd469419..50193aac679e81b5c0a157e933db006958fa4463 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/libertas/ethtool.c @@ -2,13 +2,8 @@ #include #include -#include "host.h" #include "decl.h" -#include "defs.h" -#include "dev.h" -#include "wext.h" #include "cmd.h" -#include "mesh.h" static void lbs_ethtool_get_drvinfo(struct net_device *dev, @@ -69,14 +64,11 @@ static void lbs_ethtool_get_wol(struct net_device *dev, { struct lbs_private *priv = dev->ml_priv; - if (priv->wol_criteria == 0xffffffff) { - /* Interface driver didn't configure wake */ - wol->supported = wol->wolopts = 0; - return; - } - wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY; + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) + return; + if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA) wol->wolopts |= WAKE_UCAST; if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA) @@ -91,23 +83,22 @@ static int lbs_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct lbs_private *priv = dev->ml_priv; - uint32_t criteria = 0; if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY)) return -EOPNOTSUPP; + priv->wol_criteria = 0; if (wol->wolopts & WAKE_UCAST) - criteria |= EHS_WAKE_ON_UNICAST_DATA; + priv->wol_criteria |= EHS_WAKE_ON_UNICAST_DATA; if (wol->wolopts & WAKE_MCAST) - criteria |= EHS_WAKE_ON_MULTICAST_DATA; + priv->wol_criteria |= EHS_WAKE_ON_MULTICAST_DATA; if (wol->wolopts & WAKE_BCAST) - criteria |= EHS_WAKE_ON_BROADCAST_DATA; + priv->wol_criteria |= EHS_WAKE_ON_BROADCAST_DATA; if (wol->wolopts & WAKE_PHY) - criteria |= EHS_WAKE_ON_MAC_EVENT; + priv->wol_criteria |= EHS_WAKE_ON_MAC_EVENT; if (wol->wolopts == 0) - criteria |= EHS_REMOVE_WAKEUP; - - return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL); + priv->wol_criteria |= EHS_REMOVE_WAKEUP; + return 0; } const struct ethtool_ops lbs_ethtool_ops = { diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h index 3809c0b4946455786ebacd4736506f83fd7882d6..5eac1351a02133e692074895bdb625aa28e28212 100644 --- a/drivers/net/wireless/libertas/host.h +++ b/drivers/net/wireless/libertas/host.h @@ -94,11 +94,9 @@ #define CMD_802_11_BEACON_CTRL 0x00b0 /* For the IEEE Power Save */ -#define CMD_SUBCMD_ENTER_PS 0x0030 -#define CMD_SUBCMD_EXIT_PS 0x0031 -#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 -#define CMD_SUBCMD_FULL_POWERDOWN 0x0035 -#define CMD_SUBCMD_FULL_POWERUP 0x0036 +#define PS_MODE_ACTION_ENTER_PS 0x0030 +#define PS_MODE_ACTION_EXIT_PS 0x0031 +#define PS_MODE_ACTION_SLEEP_CONFIRMED 0x0034 #define CMD_ENABLE_RSN 0x0001 #define CMD_DISABLE_RSN 0x0000 @@ -163,11 +161,6 @@ #define CMD_ACT_SET_TX_FIX_RATE 0x0001 #define CMD_ACT_GET_TX_RATE 0x0002 -/* Define action or option for CMD_802_11_PS_MODE */ -#define CMD_TYPE_CAM 0x0000 -#define CMD_TYPE_MAX_PSP 0x0001 -#define CMD_TYPE_FAST_PSP 0x0002 - /* Options for CMD_802_11_FW_WAKE_METHOD */ #define CMD_WAKE_METHOD_UNCHANGED 0x0000 #define CMD_WAKE_METHOD_COMMAND_INT 0x0001 @@ -326,7 +319,7 @@ struct txpd { u8 pktdelay_2ms; /* reserved */ u8 reserved1; -} __attribute__ ((packed)); +} __packed; /* RxPD Descriptor */ struct rxpd { @@ -339,8 +332,8 @@ struct rxpd { u8 bss_type; /* BSS number */ u8 bss_num; - } __attribute__ ((packed)) bss; - } __attribute__ ((packed)) u; + } __packed bss; + } __packed u; /* SNR */ u8 snr; @@ -366,14 +359,14 @@ struct rxpd { /* Pkt Priority */ u8 priority; u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; struct cmd_header { __le16 command; __le16 size; __le16 seqnum; __le16 result; -} __attribute__ ((packed)); +} __packed; /* Generic structure to hold all key types. */ struct enc_key { @@ -387,7 +380,23 @@ struct enc_key { struct lbs_offset_value { u32 offset; u32 value; -} __attribute__ ((packed)); +} __packed; + +#define MAX_11D_TRIPLETS 83 + +struct mrvl_ie_domain_param_set { + struct mrvl_ie_header header; + + u8 country_code[3]; + struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS]; +} __packed; + +struct cmd_ds_802_11d_domain_info { + struct cmd_header hdr; + + __le16 action; + struct mrvl_ie_domain_param_set domain; +} __packed; /* * Define data structure for CMD_GET_HW_SPEC @@ -426,7 +435,7 @@ struct cmd_ds_get_hw_spec { /*FW/HW capability */ __le32 fwcapinfo; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_subscribe_event { struct cmd_header hdr; @@ -440,7 +449,7 @@ struct cmd_ds_802_11_subscribe_event { * bump this up a bit. */ uint8_t tlv[128]; -} __attribute__ ((packed)); +} __packed; /* * This scan handle Country Information IE(802.11d compliant) @@ -452,7 +461,7 @@ struct cmd_ds_802_11_scan { uint8_t bsstype; uint8_t bssid[ETH_ALEN]; uint8_t tlvbuffer[0]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_scan_rsp { struct cmd_header hdr; @@ -460,7 +469,7 @@ struct cmd_ds_802_11_scan_rsp { __le16 bssdescriptsize; uint8_t nr_sets; uint8_t bssdesc_and_tlvbuffer[0]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_get_log { struct cmd_header hdr; @@ -478,20 +487,20 @@ struct cmd_ds_802_11_get_log { __le32 fcserror; __le32 txframe; __le32 wepundecryptable; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_mac_control { struct cmd_header hdr; __le16 action; u16 reserved; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_mac_multicast_adr { struct cmd_header hdr; __le16 action; __le16 nr_of_adrs; u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_authenticate { struct cmd_header hdr; @@ -499,14 +508,14 @@ struct cmd_ds_802_11_authenticate { u8 bssid[ETH_ALEN]; u8 authtype; u8 reserved[10]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_deauthenticate { struct cmd_header hdr; u8 macaddr[ETH_ALEN]; __le16 reasoncode; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_associate { struct cmd_header hdr; @@ -517,7 +526,7 @@ struct cmd_ds_802_11_associate { __le16 bcnperiod; u8 dtimperiod; u8 iebuf[512]; /* Enough for required and most optional IEs */ -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_associate_response { struct cmd_header hdr; @@ -526,7 +535,7 @@ struct cmd_ds_802_11_associate_response { __le16 statuscode; __le16 aid; u8 iebuf[512]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_set_wep { struct cmd_header hdr; @@ -540,7 +549,7 @@ struct cmd_ds_802_11_set_wep { /* 40, 128bit or TXWEP */ uint8_t keytype[4]; uint8_t keymaterial[4][16]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_snmp_mib { struct cmd_header hdr; @@ -549,40 +558,33 @@ struct cmd_ds_802_11_snmp_mib { __le16 oid; __le16 bufsize; u8 value[128]; -} __attribute__ ((packed)); - -struct cmd_ds_mac_reg_access { - __le16 action; - __le16 offset; - __le32 value; -} __attribute__ ((packed)); +} __packed; -struct cmd_ds_bbp_reg_access { - __le16 action; - __le16 offset; - u8 value; - u8 reserved[3]; -} __attribute__ ((packed)); +struct cmd_ds_reg_access { + struct cmd_header hdr; -struct cmd_ds_rf_reg_access { __le16 action; __le16 offset; - u8 value; - u8 reserved[3]; -} __attribute__ ((packed)); + union { + u8 bbp_rf; /* for BBP and RF registers */ + __le32 mac; /* for MAC registers */ + } value; +} __packed; struct cmd_ds_802_11_radio_control { struct cmd_header hdr; __le16 action; __le16 control; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_beacon_control { + struct cmd_header hdr; + __le16 action; __le16 beacon_enable; __le16 beacon_period; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_sleep_params { struct cmd_header hdr; @@ -607,7 +609,7 @@ struct cmd_ds_802_11_sleep_params { /* reserved field, should be set to zero */ __le16 reserved; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_rf_channel { struct cmd_header hdr; @@ -617,30 +619,30 @@ struct cmd_ds_802_11_rf_channel { __le16 rftype; /* unused */ __le16 reserved; /* unused */ u8 channellist[32]; /* unused */ -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_rssi { - /* weighting factor */ - __le16 N; + struct cmd_header hdr; - __le16 reserved_0; - __le16 reserved_1; - __le16 reserved_2; -} __attribute__ ((packed)); + /* request: number of beacons (N) to average the SNR and NF over + * response: SNR of most recent beacon + */ + __le16 n_or_snr; -struct cmd_ds_802_11_rssi_rsp { - __le16 SNR; - __le16 noisefloor; - __le16 avgSNR; - __le16 avgnoisefloor; -} __attribute__ ((packed)); + /* The following fields are only set in the response. + * In the request these are reserved and should be set to 0. + */ + __le16 nf; /* most recent beacon noise floor */ + __le16 avg_snr; /* average SNR weighted by N from request */ + __le16 avg_nf; /* average noise floor weighted by N from request */ +} __packed; struct cmd_ds_802_11_mac_address { struct cmd_header hdr; __le16 action; u8 macadd[ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_rf_tx_power { struct cmd_header hdr; @@ -649,34 +651,61 @@ struct cmd_ds_802_11_rf_tx_power { __le16 curlevel; s8 maxlevel; s8 minlevel; -} __attribute__ ((packed)); +} __packed; +/* MONITOR_MODE only exists in OLPC v5 firmware */ struct cmd_ds_802_11_monitor_mode { + struct cmd_header hdr; + __le16 action; __le16 mode; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_set_boot2_ver { struct cmd_header hdr; __le16 action; __le16 version; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_fw_wake_method { struct cmd_header hdr; __le16 action; __le16 method; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_ps_mode { + struct cmd_header hdr; + __le16 action; + + /* Interval for keepalive in PS mode: + * 0x0000 = don't change + * 0x001E = firmware default + * 0xFFFF = disable + */ __le16 nullpktinterval; + + /* Number of DTIM intervals to wake up for: + * 0 = don't change + * 1 = firmware default + * 5 = max + */ __le16 multipledtim; + __le16 reserved; __le16 locallisteninterval; -} __attribute__ ((packed)); + + /* AdHoc awake period (FW v9+ only): + * 0 = don't change + * 1 = always awake (IEEE standard behavior) + * 2 - 31 = sleep for (n - 1) periods and awake for 1 period + * 32 - 254 = invalid + * 255 = sleep at each ATIM + */ + __le16 adhoc_awake_period; +} __packed; struct cmd_confirm_sleep { struct cmd_header hdr; @@ -686,7 +715,7 @@ struct cmd_confirm_sleep { __le16 multipledtim; __le16 reserved; __le16 locallisteninterval; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_data_rate { struct cmd_header hdr; @@ -694,14 +723,14 @@ struct cmd_ds_802_11_data_rate { __le16 action; __le16 reserved; u8 rates[MAX_RATES]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_rate_adapt_rateset { struct cmd_header hdr; __le16 action; __le16 enablehwauto; __le16 bitmap; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_ad_hoc_start { struct cmd_header hdr; @@ -718,14 +747,14 @@ struct cmd_ds_802_11_ad_hoc_start { __le16 capability; u8 rates[MAX_RATES]; u8 tlv_memory_size_pad[100]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_ad_hoc_result { struct cmd_header hdr; u8 pad[3]; u8 bssid[ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; struct adhoc_bssdesc { u8 bssid[ETH_ALEN]; @@ -746,7 +775,7 @@ struct adhoc_bssdesc { * Adhoc join command and will cause a binary layout mismatch with * the firmware */ -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_ad_hoc_join { struct cmd_header hdr; @@ -754,18 +783,18 @@ struct cmd_ds_802_11_ad_hoc_join { struct adhoc_bssdesc bss; __le16 failtimeout; /* Reserved on v9 and later */ __le16 probedelay; /* Reserved on v9 and later */ -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_ad_hoc_stop { struct cmd_header hdr; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_enable_rsn { struct cmd_header hdr; __le16 action; __le16 enable; -} __attribute__ ((packed)); +} __packed; struct MrvlIEtype_keyParamSet { /* type ID */ @@ -785,7 +814,7 @@ struct MrvlIEtype_keyParamSet { /* key material of size keylen */ u8 key[32]; -} __attribute__ ((packed)); +} __packed; #define MAX_WOL_RULES 16 @@ -797,7 +826,7 @@ struct host_wol_rule { __le16 reserve; __be32 sig_mask; __be32 signature; -} __attribute__ ((packed)); +} __packed; struct wol_config { uint8_t action; @@ -805,7 +834,7 @@ struct wol_config { uint8_t no_rules_in_cmd; uint8_t result; struct host_wol_rule rule[MAX_WOL_RULES]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_host_sleep { struct cmd_header hdr; @@ -813,7 +842,7 @@ struct cmd_ds_host_sleep { uint8_t gpio; uint16_t gap; struct wol_config wol_conf; -} __attribute__ ((packed)); +} __packed; @@ -822,7 +851,7 @@ struct cmd_ds_802_11_key_material { __le16 action; struct MrvlIEtype_keyParamSet keyParamSet[2]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_eeprom_access { struct cmd_header hdr; @@ -832,7 +861,7 @@ struct cmd_ds_802_11_eeprom_access { /* firmware says it returns a maximum of 20 bytes */ #define LBS_EEPROM_READ_LEN 20 u8 value[LBS_EEPROM_READ_LEN]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_tpc_cfg { struct cmd_header hdr; @@ -843,7 +872,7 @@ struct cmd_ds_802_11_tpc_cfg { int8_t P1; int8_t P2; uint8_t usesnr; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_pa_cfg { @@ -854,16 +883,21 @@ struct cmd_ds_802_11_pa_cfg { int8_t P0; int8_t P1; int8_t P2; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_802_11_led_ctrl { + struct cmd_header hdr; + __le16 action; __le16 numled; u8 data[256]; -} __attribute__ ((packed)); +} __packed; +/* Automatic Frequency Control */ struct cmd_ds_802_11_afc { + struct cmd_header hdr; + __le16 afc_auto; union { struct { @@ -875,24 +909,28 @@ struct cmd_ds_802_11_afc { __le16 carrier_offset; /* signed */ }; }; -} __attribute__ ((packed)); +} __packed; struct cmd_tx_rate_query { __le16 txrate; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_get_tsf { __le64 tsfvalue; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_bt_access { + struct cmd_header hdr; + __le16 action; __le32 id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_fwt_access { + struct cmd_header hdr; + __le16 action; __le32 id; u8 valid; @@ -910,7 +948,7 @@ struct cmd_ds_fwt_access { __le32 snr; __le32 references; u8 prec[ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_mesh_config { struct cmd_header hdr; @@ -920,43 +958,15 @@ struct cmd_ds_mesh_config { __le16 type; __le16 length; u8 data[128]; /* last position reserved */ -} __attribute__ ((packed)); +} __packed; struct cmd_ds_mesh_access { struct cmd_header hdr; __le16 action; __le32 data[32]; /* last position reserved */ -} __attribute__ ((packed)); +} __packed; /* Number of stats counters returned by the firmware */ #define MESH_STATS_NUM 8 - -struct cmd_ds_command { - /* command header */ - __le16 command; - __le16 size; - __le16 seqnum; - __le16 result; - - /* command Body */ - union { - struct cmd_ds_802_11_ps_mode psmode; - struct cmd_ds_802_11_monitor_mode monitor; - struct cmd_ds_802_11_rssi rssi; - struct cmd_ds_802_11_rssi_rsp rssirsp; - struct cmd_ds_mac_reg_access macreg; - struct cmd_ds_bbp_reg_access bbpreg; - struct cmd_ds_rf_reg_access rfreg; - - struct cmd_ds_802_11_tpc_cfg tpccfg; - struct cmd_ds_802_11_afc afc; - struct cmd_ds_802_11_led_ctrl ledgpio; - - struct cmd_ds_bt_access bt; - struct cmd_ds_fwt_access fwt; - struct cmd_ds_802_11_beacon_control bcn_ctrl; - } params; -} __attribute__ ((packed)); - #endif diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 64dd345d30f526f2c10ff0b4c0b03998798e0fc0..6e71346a75505a4398964a2889eef4803822f5a4 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1182,11 +1182,69 @@ static void if_sdio_remove(struct sdio_func *func) lbs_deb_leave(LBS_DEB_SDIO); } +static int if_sdio_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + int ret; + struct if_sdio_card *card = sdio_get_drvdata(func); + + mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); + + lbs_pr_info("%s: suspend: PM flags = 0x%x\n", + sdio_func_id(func), flags); + + /* If we aren't being asked to wake on anything, we should bail out + * and let the SD stack power down the card. + */ + if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) { + lbs_pr_info("Suspend without wake params -- " + "powering down card."); + return -ENOSYS; + } + + if (!(flags & MMC_PM_KEEP_POWER)) { + lbs_pr_err("%s: cannot remain alive while host is suspended\n", + sdio_func_id(func)); + return -ENOSYS; + } + + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) + return ret; + + ret = lbs_suspend(card->priv); + if (ret) + return ret; + + return sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); +} + +static int if_sdio_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct if_sdio_card *card = sdio_get_drvdata(func); + int ret; + + lbs_pr_info("%s: resume: we're back\n", sdio_func_id(func)); + + ret = lbs_resume(card->priv); + + return ret; +} + +static const struct dev_pm_ops if_sdio_pm_ops = { + .suspend = if_sdio_suspend, + .resume = if_sdio_resume, +}; + static struct sdio_driver if_sdio_driver = { .name = "libertas_sdio", .id_table = if_sdio_ids, .probe = if_sdio_probe, .remove = if_sdio_remove, + .drv = { + .pm = &if_sdio_pm_ops, + }, }; /*******************************************************************/ diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index f41594c7ac16a09e712cc75a3cf85323870b5c17..07ece9d26c63c373eb26e71ddb7ac7b832c17c1b 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -433,7 +433,7 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp) static int if_usb_reset_device(struct if_usb_card *cardp) { - struct cmd_ds_command *cmd = cardp->ep_out_buf + 4; + struct cmd_header *cmd = cardp->ep_out_buf + 4; int ret; lbs_deb_enter(LBS_DEB_USB); @@ -441,7 +441,7 @@ static int if_usb_reset_device(struct if_usb_card *cardp) *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); cmd->command = cpu_to_le16(CMD_802_11_RESET); - cmd->size = cpu_to_le16(sizeof(struct cmd_header)); + cmd->size = cpu_to_le16(sizeof(cmd)); cmd->result = cpu_to_le16(0); cmd->seqnum = cpu_to_le16(0x5a5a); usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_header)); @@ -613,16 +613,14 @@ static void if_usb_receive_fwload(struct urb *urb) return; } - syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); + syncfwheader = kmemdup(skb->data + IPFIELD_ALIGN_OFFSET, + sizeof(struct fwsyncheader), GFP_ATOMIC); if (!syncfwheader) { lbs_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); kfree_skb(skb); return; } - memcpy(syncfwheader, skb->data + IPFIELD_ALIGN_OFFSET, - sizeof(struct fwsyncheader)); - if (!syncfwheader->cmd) { lbs_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); lbs_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", @@ -1043,6 +1041,12 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message) if (priv->psstate != PS_STATE_FULL_POWER) return -1; + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { + lbs_pr_info("Suspend attempt without " + "configuring wake params!\n"); + return -ENOSYS; + } + ret = lbs_suspend(priv); if (ret) goto out; diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index d9b8ee130c450eb4006cafa411baa7a5b5038f60..258967144b962684a50c5a50c48591b2de14bbe9 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -11,20 +11,14 @@ #include #include #include -#include -#include #include -#include #include #include "host.h" #include "decl.h" #include "dev.h" -#include "wext.h" #include "cfg.h" #include "debugfs.h" -#include "scan.h" -#include "assoc.h" #include "cmd.h" #define DRIVER_RELEASE_VERSION "323.p0" @@ -96,72 +90,6 @@ u8 lbs_data_rate_to_fw_index(u32 rate) } -static int lbs_add_rtap(struct lbs_private *priv); -static void lbs_remove_rtap(struct lbs_private *priv); - - -/** - * Get function for sysfs attribute rtap - */ -static ssize_t lbs_rtap_get(struct device *dev, - struct device_attribute *attr, char * buf) -{ - struct lbs_private *priv = to_net_dev(dev)->ml_priv; - return snprintf(buf, 5, "0x%X\n", priv->monitormode); -} - -/** - * Set function for sysfs attribute rtap - */ -static ssize_t lbs_rtap_set(struct device *dev, - struct device_attribute *attr, const char * buf, size_t count) -{ - int monitor_mode; - struct lbs_private *priv = to_net_dev(dev)->ml_priv; - - sscanf(buf, "%x", &monitor_mode); - if (monitor_mode) { - if (priv->monitormode == monitor_mode) - return strlen(buf); - if (!priv->monitormode) { - if (priv->infra_open || lbs_mesh_open(priv)) - return -EBUSY; - if (priv->mode == IW_MODE_INFRA) - lbs_cmd_80211_deauthenticate(priv, - priv->curbssparams.bssid, - WLAN_REASON_DEAUTH_LEAVING); - else if (priv->mode == IW_MODE_ADHOC) - lbs_adhoc_stop(priv); - lbs_add_rtap(priv); - } - priv->monitormode = monitor_mode; - } else { - if (!priv->monitormode) - return strlen(buf); - priv->monitormode = 0; - lbs_remove_rtap(priv); - - if (priv->currenttxskb) { - dev_kfree_skb_any(priv->currenttxskb); - priv->currenttxskb = NULL; - } - - /* Wake queues, command thread, etc. */ - lbs_host_to_card_done(priv); - } - - lbs_prepare_and_send_command(priv, - CMD_802_11_MONITOR_MODE, CMD_ACT_SET, - CMD_OPTION_WAITFORRSP, 0, &priv->monitormode); - return strlen(buf); -} - -/** - * lbs_rtap attribute to be exported per ethX interface - * through sysfs (/sys/class/net/ethX/lbs_rtap) - */ -static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set ); - /** * @brief This function opens the ethX interface * @@ -177,13 +105,6 @@ static int lbs_dev_open(struct net_device *dev) spin_lock_irq(&priv->driver_lock); - if (priv->monitormode) { - ret = -EBUSY; - goto out; - } - - priv->infra_open = 1; - if (priv->connect_status == LBS_CONNECTED) netif_carrier_on(dev); else @@ -191,7 +112,6 @@ static int lbs_dev_open(struct net_device *dev) if (!priv->tx_pending_len) netif_wake_queue(dev); - out: spin_unlock_irq(&priv->driver_lock); lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); @@ -211,7 +131,6 @@ static int lbs_eth_stop(struct net_device *dev) lbs_deb_enter(LBS_DEB_NET); spin_lock_irq(&priv->driver_lock); - priv->infra_open = 0; netif_stop_queue(dev); spin_unlock_irq(&priv->driver_lock); @@ -238,12 +157,7 @@ static void lbs_tx_timeout(struct net_device *dev) to kick it somehow? */ lbs_host_to_card_done(priv); - /* More often than not, this actually happens because the - firmware has crapped itself -- rather than just a very - busy medium. So send a harmless command, and if/when - _that_ times out, we'll kick it in the head. */ - lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, - 0, 0, NULL); + /* FIXME: reset the card */ lbs_deb_leave(LBS_DEB_TX); } @@ -588,12 +502,6 @@ static int lbs_thread(void *data) if (!priv->dnld_sent && !priv->cur_cmd) lbs_execute_next_command(priv); - /* Wake-up command waiters which can't sleep in - * lbs_prepare_and_send_command - */ - if (!list_empty(&priv->cmdpendingq)) - wake_up_all(&priv->cmd_pending); - spin_lock_irq(&priv->driver_lock); if (!priv->dnld_sent && priv->tx_pending_len > 0) { int ret = priv->hw_host_to_card(priv, MVMS_DAT, @@ -619,66 +527,58 @@ static int lbs_thread(void *data) del_timer(&priv->command_timer); del_timer(&priv->auto_deepsleep_timer); - wake_up_all(&priv->cmd_pending); lbs_deb_leave(LBS_DEB_THREAD); return 0; } -static int lbs_suspend_callback(struct lbs_private *priv, unsigned long dummy, - struct cmd_header *cmd) -{ - lbs_deb_enter(LBS_DEB_FW); - - netif_device_detach(priv->dev); - if (priv->mesh_dev) - netif_device_detach(priv->mesh_dev); - - priv->fw_ready = 0; - lbs_deb_leave(LBS_DEB_FW); - return 0; -} - int lbs_suspend(struct lbs_private *priv) { - struct cmd_header cmd; int ret; lbs_deb_enter(LBS_DEB_FW); - if (priv->wol_criteria == 0xffffffff) { - lbs_pr_info("Suspend attempt without configuring wake params!\n"); - return -EINVAL; + if (priv->is_deep_sleep) { + ret = lbs_set_deep_sleep(priv, 0); + if (ret) { + lbs_pr_err("deep sleep cancellation failed: %d\n", ret); + return ret; + } + priv->deep_sleep_required = 1; } - memset(&cmd, 0, sizeof(cmd)); + ret = lbs_set_host_sleep(priv, 1); - ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd, - sizeof(cmd), lbs_suspend_callback, 0); - if (ret) - lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret); + netif_device_detach(priv->dev); + if (priv->mesh_dev) + netif_device_detach(priv->mesh_dev); lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(lbs_suspend); -void lbs_resume(struct lbs_private *priv) +int lbs_resume(struct lbs_private *priv) { - lbs_deb_enter(LBS_DEB_FW); + int ret; - priv->fw_ready = 1; + lbs_deb_enter(LBS_DEB_FW); - /* Firmware doesn't seem to give us RX packets any more - until we send it some command. Might as well update */ - lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, - 0, 0, NULL); + ret = lbs_set_host_sleep(priv, 0); netif_device_attach(priv->dev); if (priv->mesh_dev) netif_device_attach(priv->mesh_dev); - lbs_deb_leave(LBS_DEB_FW); + if (priv->deep_sleep_required) { + priv->deep_sleep_required = 0; + ret = lbs_set_deep_sleep(priv, 1); + if (ret) + lbs_pr_err("deep sleep activation failed: %d\n", ret); + } + + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; } EXPORT_SYMBOL_GPL(lbs_resume); @@ -710,6 +610,9 @@ static int lbs_setup_firmware(struct lbs_private *priv) priv->txpower_max = maxlevel; } + /* Send cmd to FW to enable 11D function */ + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1); + lbs_set_mac_control(priv); done: lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); @@ -748,7 +651,6 @@ static void lbs_cmd_timeout_handler(unsigned long data) static void auto_deepsleep_timer_fn(unsigned long data) { struct lbs_private *priv = (struct lbs_private *)data; - int ret; lbs_deb_enter(LBS_DEB_CMD); @@ -756,14 +658,15 @@ static void auto_deepsleep_timer_fn(unsigned long data) priv->is_activity_detected = 0; } else { if (priv->is_auto_deep_sleep_enabled && - (!priv->wakeup_dev_required) && - (priv->connect_status != LBS_CONNECTED)) { + (!priv->wakeup_dev_required) && + (priv->connect_status != LBS_CONNECTED)) { + struct cmd_header cmd; + lbs_deb_main("Entering auto deep sleep mode...\n"); - ret = lbs_prepare_and_send_command(priv, - CMD_802_11_DEEP_SLEEP, 0, - 0, 0, NULL); - if (ret) - lbs_pr_err("Enter Deep Sleep command failed\n"); + memset(&cmd, 0, sizeof(cmd)); + cmd.size = cpu_to_le16(sizeof(cmd)); + lbs_cmd_async(priv, CMD_802_11_DEEP_SLEEP, &cmd, + sizeof(cmd)); } } mod_timer(&priv->auto_deepsleep_timer , jiffies + @@ -799,45 +702,27 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv) static int lbs_init_adapter(struct lbs_private *priv) { - size_t bufsize; - int i, ret = 0; + int ret; lbs_deb_enter(LBS_DEB_MAIN); - /* Allocate buffer to store the BSSID list */ - bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor); - priv->networks = kzalloc(bufsize, GFP_KERNEL); - if (!priv->networks) { - lbs_pr_err("Out of memory allocating beacons\n"); - ret = -1; - goto out; - } - - /* Initialize scan result lists */ - INIT_LIST_HEAD(&priv->network_free_list); - INIT_LIST_HEAD(&priv->network_list); - for (i = 0; i < MAX_NETWORK_COUNT; i++) { - list_add_tail(&priv->networks[i].list, - &priv->network_free_list); - } - memset(priv->current_addr, 0xff, ETH_ALEN); priv->connect_status = LBS_DISCONNECTED; - priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - priv->mode = IW_MODE_INFRA; priv->channel = DEFAULT_AD_HOC_CHANNEL; priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; priv->radio_on = 1; - priv->enablehwauto = 1; priv->psmode = LBS802_11POWERMODECAM; priv->psstate = PS_STATE_FULL_POWER; priv->is_deep_sleep = 0; priv->is_auto_deep_sleep_enabled = 0; + priv->deep_sleep_required = 0; priv->wakeup_dev_required = 0; init_waitqueue_head(&priv->ds_awake_q); priv->authtype_auto = 1; - + priv->is_host_sleep_configured = 0; + priv->is_host_sleep_activated = 0; + init_waitqueue_head(&priv->host_sleep_q); mutex_init(&priv->lock); setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, @@ -849,7 +734,6 @@ static int lbs_init_adapter(struct lbs_private *priv) INIT_LIST_HEAD(&priv->cmdpendingq); spin_lock_init(&priv->driver_lock); - init_waitqueue_head(&priv->cmd_pending); /* Allocate the command buffers */ if (lbs_allocate_cmd_buffer(priv)) { @@ -881,8 +765,6 @@ static void lbs_free_adapter(struct lbs_private *priv) kfifo_free(&priv->event_fifo); del_timer(&priv->command_timer); del_timer(&priv->auto_deepsleep_timer); - kfree(priv->networks); - priv->networks = NULL; lbs_deb_leave(LBS_DEB_MAIN); } @@ -919,7 +801,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) lbs_pr_err("cfg80211 init failed\n"); goto done; } - /* TODO? */ + wdev->iftype = NL80211_IFTYPE_STATION; priv = wdev_priv(wdev); priv->wdev = wdev; @@ -929,7 +811,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) goto err_wdev; } - //TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); dev = alloc_netdev(0, "wlan%d", ether_setup); if (!dev) { dev_err(dmdev, "no memory for network device instance\n"); @@ -945,20 +826,10 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) dev->netdev_ops = &lbs_netdev_ops; dev->watchdog_timeo = 5 * HZ; dev->ethtool_ops = &lbs_ethtool_ops; -#ifdef WIRELESS_EXT - dev->wireless_handlers = &lbs_handler_def; -#endif dev->flags |= IFF_BROADCAST | IFF_MULTICAST; - - // TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ?? - - priv->card = card; - priv->infra_open = 0; - - priv->rtap_net_dev = NULL; strcpy(dev->name, "wlan%d"); lbs_deb_thread("Starting main thread...\n"); @@ -970,12 +841,11 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) } priv->work_thread = create_singlethread_workqueue("lbs_worker"); - INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); - INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); priv->wol_criteria = 0xffffffff; priv->wol_gpio = 0xff; + priv->wol_gap = 20; goto done; @@ -1004,12 +874,10 @@ void lbs_remove_card(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_MAIN); lbs_remove_mesh(priv); - lbs_remove_rtap(priv); + lbs_scan_deinit(priv); dev = priv->dev; - cancel_delayed_work_sync(&priv->scan_work); - cancel_delayed_work_sync(&priv->assoc_work); cancel_work_sync(&priv->mcast_work); /* worker thread destruction blocks on the in-flight command which @@ -1021,16 +889,18 @@ void lbs_remove_card(struct lbs_private *priv) if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { priv->psmode = LBS802_11POWERMODECAM; - lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); + lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); } - lbs_send_disconnect_notification(priv); - if (priv->is_deep_sleep) { priv->is_deep_sleep = 0; wake_up_interruptible(&priv->ds_awake_q); } + priv->is_host_sleep_configured = 0; + priv->is_host_sleep_activated = 0; + wake_up_interruptible(&priv->host_sleep_q); + /* Stop the thread servicing the interrupts */ priv->surpriseremoved = 1; kthread_stop(priv->main_thread); @@ -1046,7 +916,7 @@ void lbs_remove_card(struct lbs_private *priv) EXPORT_SYMBOL_GPL(lbs_remove_card); -static int lbs_rtap_supported(struct lbs_private *priv) +int lbs_rtap_supported(struct lbs_private *priv) { if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) return 1; @@ -1078,16 +948,6 @@ int lbs_start_card(struct lbs_private *priv) lbs_init_mesh(priv); - /* - * While rtap isn't related to mesh, only mesh-enabled - * firmware implements the rtap functionality via - * CMD_802_11_MONITOR_MODE. - */ - if (lbs_rtap_supported(priv)) { - if (device_create_file(&dev->dev, &dev_attr_lbs_rtap)) - lbs_pr_err("cannot register lbs_rtap attribute\n"); - } - lbs_debugfs_init_one(priv, dev); lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); @@ -1119,9 +979,6 @@ void lbs_stop_card(struct lbs_private *priv) lbs_debugfs_remove_one(priv); lbs_deinit_mesh(priv); - if (lbs_rtap_supported(priv)) - device_remove_file(&dev->dev, &dev_attr_lbs_rtap); - /* Delete the timeout of the currently processing command */ del_timer_sync(&priv->command_timer); del_timer_sync(&priv->auto_deepsleep_timer); @@ -1195,7 +1052,7 @@ static int __init lbs_init_module(void) memset(&confirm_sleep, 0, sizeof(confirm_sleep)); confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE); confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep)); - confirm_sleep.action = cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED); + confirm_sleep.action = cpu_to_le16(PS_MODE_ACTION_SLEEP_CONFIRMED); lbs_debugfs_init(); lbs_deb_leave(LBS_DEB_MAIN); return 0; @@ -1208,87 +1065,6 @@ static void __exit lbs_exit_module(void) lbs_deb_leave(LBS_DEB_MAIN); } -/* - * rtap interface support fuctions - */ - -static int lbs_rtap_open(struct net_device *dev) -{ - /* Yes, _stop_ the queue. Because we don't support injection */ - lbs_deb_enter(LBS_DEB_MAIN); - netif_carrier_off(dev); - netif_stop_queue(dev); - lbs_deb_leave(LBS_DEB_LEAVE); - return 0; -} - -static int lbs_rtap_stop(struct net_device *dev) -{ - lbs_deb_enter(LBS_DEB_MAIN); - lbs_deb_leave(LBS_DEB_MAIN); - return 0; -} - -static netdev_tx_t lbs_rtap_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - netif_stop_queue(dev); - return NETDEV_TX_BUSY; -} - -static void lbs_remove_rtap(struct lbs_private *priv) -{ - lbs_deb_enter(LBS_DEB_MAIN); - if (priv->rtap_net_dev == NULL) - goto out; - unregister_netdev(priv->rtap_net_dev); - free_netdev(priv->rtap_net_dev); - priv->rtap_net_dev = NULL; -out: - lbs_deb_leave(LBS_DEB_MAIN); -} - -static const struct net_device_ops rtap_netdev_ops = { - .ndo_open = lbs_rtap_open, - .ndo_stop = lbs_rtap_stop, - .ndo_start_xmit = lbs_rtap_hard_start_xmit, -}; - -static int lbs_add_rtap(struct lbs_private *priv) -{ - int ret = 0; - struct net_device *rtap_dev; - - lbs_deb_enter(LBS_DEB_MAIN); - if (priv->rtap_net_dev) { - ret = -EPERM; - goto out; - } - - rtap_dev = alloc_netdev(0, "rtap%d", ether_setup); - if (rtap_dev == NULL) { - ret = -ENOMEM; - goto out; - } - - memcpy(rtap_dev->dev_addr, priv->current_addr, ETH_ALEN); - rtap_dev->type = ARPHRD_IEEE80211_RADIOTAP; - rtap_dev->netdev_ops = &rtap_netdev_ops; - rtap_dev->ml_priv = priv; - SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); - - ret = register_netdev(rtap_dev); - if (ret) { - free_netdev(rtap_dev); - goto out; - } - priv->rtap_net_dev = rtap_dev; - -out: - lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); - return ret; -} - module_init(lbs_init_module); module_exit(lbs_exit_module); diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c index e385af1f458351faa38601318c7880c508f181f6..194762ab014250feb61b710f66b6fda737b08f2b 100644 --- a/drivers/net/wireless/libertas/mesh.c +++ b/drivers/net/wireless/libertas/mesh.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "mesh.h" #include "decl.h" @@ -314,7 +315,7 @@ static int lbs_mesh_dev_open(struct net_device *dev) spin_lock_irq(&priv->driver_lock); - if (priv->monitormode) { + if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { ret = -EBUSY; goto out; } @@ -369,9 +370,6 @@ int lbs_add_mesh(struct lbs_private *priv) SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); -#ifdef WIRELESS_EXT - mesh_dev->wireless_handlers = &mesh_handler_def; -#endif mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; /* Register virtual mesh interface */ ret = register_netdev(mesh_dev); @@ -457,65 +455,189 @@ void lbs_mesh_set_txpd(struct lbs_private *priv, * Mesh command handling */ -int lbs_cmd_bt_access(struct cmd_ds_command *cmd, - u16 cmd_action, void *pdata_buf) +/** + * @brief Add or delete Mesh Blinding Table entries + * + * @param priv A pointer to struct lbs_private structure + * @param add TRUE to add the entry, FALSE to delete it + * @param addr1 Destination address to blind or unblind + * + * @return 0 on success, error on failure + */ +int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1) { - struct cmd_ds_bt_access *bt_access = &cmd->params.bt; - lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); + struct cmd_ds_bt_access cmd; + int ret = 0; - cmd->command = cpu_to_le16(CMD_BT_ACCESS); - cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + - sizeof(struct cmd_header)); - cmd->result = 0; - bt_access->action = cpu_to_le16(cmd_action); + lbs_deb_enter(LBS_DEB_CMD); - switch (cmd_action) { - case CMD_ACT_BT_ACCESS_ADD: - memcpy(bt_access->addr1, pdata_buf, 2 * ETH_ALEN); + BUG_ON(addr1 == NULL); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + memcpy(cmd.addr1, addr1, ETH_ALEN); + if (add) { + cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_ADD); lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr", - bt_access->addr1, 6); - break; - case CMD_ACT_BT_ACCESS_DEL: - memcpy(bt_access->addr1, pdata_buf, 1 * ETH_ALEN); + addr1, ETH_ALEN); + } else { + cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_DEL); lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr", - bt_access->addr1, 6); - break; - case CMD_ACT_BT_ACCESS_LIST: - bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); - break; - case CMD_ACT_BT_ACCESS_RESET: - break; - case CMD_ACT_BT_ACCESS_SET_INVERT: - bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); - break; - case CMD_ACT_BT_ACCESS_GET_INVERT: - break; - default: - break; + addr1, ETH_ALEN); } - lbs_deb_leave(LBS_DEB_CMD); - return 0; + + ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd); + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; } -int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, - u16 cmd_action, void *pdata_buf) +/** + * @brief Reset/clear the mesh blinding table + * + * @param priv A pointer to struct lbs_private structure + * + * @return 0 on success, error on failure + */ +int lbs_mesh_bt_reset(struct lbs_private *priv) { - struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt; - lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); + struct cmd_ds_bt_access cmd; + int ret = 0; - cmd->command = cpu_to_le16(CMD_FWT_ACCESS); - cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + - sizeof(struct cmd_header)); - cmd->result = 0; + lbs_deb_enter(LBS_DEB_CMD); - if (pdata_buf) - memcpy(fwt_access, pdata_buf, sizeof(*fwt_access)); - else - memset(fwt_access, 0, sizeof(*fwt_access)); + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_RESET); - fwt_access->action = cpu_to_le16(cmd_action); + ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd); - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Gets the inverted status of the mesh blinding table + * + * Normally the firmware "blinds" or ignores traffic from mesh nodes in the + * table, but an inverted table allows *only* traffic from nodes listed in + * the table. + * + * @param priv A pointer to struct lbs_private structure + * @param invert On success, TRUE if the blinding table is inverted, + * FALSE if it is not inverted + * + * @return 0 on success, error on failure + */ +int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted) +{ + struct cmd_ds_bt_access cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + BUG_ON(inverted == NULL); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_GET_INVERT); + + ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd); + if (ret == 0) + *inverted = !!cmd.id; + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Sets the inverted status of the mesh blinding table + * + * Normally the firmware "blinds" or ignores traffic from mesh nodes in the + * table, but an inverted table allows *only* traffic from nodes listed in + * the table. + * + * @param priv A pointer to struct lbs_private structure + * @param invert TRUE to invert the blinding table (only traffic from + * listed nodes allowed), FALSE to return it + * to normal state (listed nodes ignored) + * + * @return 0 on success, error on failure + */ +int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted) +{ + struct cmd_ds_bt_access cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT); + cmd.id = !!inverted; + + ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd); + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief List an entry in the mesh blinding table + * + * @param priv A pointer to struct lbs_private structure + * @param id The ID of the entry to list + * @param addr1 MAC address associated with the table entry + * + * @return 0 on success, error on failure + */ +int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1) +{ + struct cmd_ds_bt_access cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + BUG_ON(addr1 == NULL); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT); + cmd.id = cpu_to_le32(id); + + ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd); + if (ret == 0) + memcpy(addr1, cmd.addr1, sizeof(cmd.addr1)); + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Access the mesh forwarding table + * + * @param priv A pointer to struct lbs_private structure + * @param cmd_action The forwarding table action to perform + * @param cmd The pre-filled FWT_ACCESS command + * + * @return 0 on success and 'cmd' will be filled with the + * firmware's response + */ +int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action, + struct cmd_ds_fwt_access *cmd) +{ + int ret; + + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); + + cmd->hdr.command = cpu_to_le16(CMD_FWT_ACCESS); + cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access)); + cmd->hdr.result = 0; + cmd->action = cpu_to_le16(cmd_action); + + ret = lbs_cmd_with_response(priv, CMD_FWT_ACCESS, cmd); + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return 0; } diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h index e2573303a328f63ac2e28f23f64739c0759d1017..afb2e8dead3f25c38fd4dedc8f12531bcccb2774 100644 --- a/drivers/net/wireless/libertas/mesh.h +++ b/drivers/net/wireless/libertas/mesh.h @@ -8,6 +8,7 @@ #include #include +#include "host.h" #ifdef CONFIG_LIBERTAS_MESH @@ -51,10 +52,15 @@ struct cmd_ds_command; struct cmd_ds_mesh_access; struct cmd_ds_mesh_config; -int lbs_cmd_bt_access(struct cmd_ds_command *cmd, - u16 cmd_action, void *pdata_buf); -int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, - u16 cmd_action, void *pdata_buf); +int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1); +int lbs_mesh_bt_reset(struct lbs_private *priv); +int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted); +int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted); +int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1); + +int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action, + struct cmd_ds_fwt_access *cmd); + int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, struct cmd_ds_mesh_access *cmd); int lbs_mesh_config_send(struct lbs_private *priv, @@ -70,11 +76,6 @@ void lbs_persist_config_init(struct net_device *net); void lbs_persist_config_remove(struct net_device *net); -/* WEXT handler */ - -extern struct iw_handler_def mesh_handler_def; - - /* Ethtool statistics */ struct ethtool_stats; diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h index d16b26416e82097f8ba4cd4464eb9f4a7fd81145..b3c8ea6d610e4e7983507ce19d3ca7f12cdeb944 100644 --- a/drivers/net/wireless/libertas/radiotap.h +++ b/drivers/net/wireless/libertas/radiotap.h @@ -6,7 +6,7 @@ struct tx_radiotap_hdr { u8 txpower; u8 rts_retries; u8 data_retries; -} __attribute__ ((packed)); +} __packed; #define TX_RADIOTAP_PRESENT ( \ (1 << IEEE80211_RADIOTAP_RATE) | \ @@ -34,7 +34,7 @@ struct rx_radiotap_hdr { u8 flags; u8 rate; u8 antsignal; -} __attribute__ ((packed)); +} __packed; #define RX_RADIOTAP_PRESENT ( \ (1 << IEEE80211_RADIOTAP_FLAGS) | \ diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 7a377f5b76627540b6309f563bf6c974be503799..a4d0bca9ef2c8aeac99f629b955d8056eaaa4c66 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -4,18 +4,19 @@ #include #include #include +#include +#include "defs.h" #include "host.h" #include "radiotap.h" #include "decl.h" #include "dev.h" -#include "wext.h" struct eth803hdr { u8 dest_addr[6]; u8 src_addr[6]; u16 h803_len; -} __attribute__ ((packed)); +} __packed; struct rfc1042hdr { u8 llc_dsap; @@ -23,113 +24,21 @@ struct rfc1042hdr { u8 llc_ctrl; u8 snap_oui[3]; u16 snap_type; -} __attribute__ ((packed)); +} __packed; struct rxpackethdr { struct eth803hdr eth803_hdr; struct rfc1042hdr rfc1042_hdr; -} __attribute__ ((packed)); +} __packed; struct rx80211packethdr { struct rxpd rx_pd; void *eth80211_hdr; -} __attribute__ ((packed)); +} __packed; static int process_rxed_802_11_packet(struct lbs_private *priv, struct sk_buff *skb); -/** - * @brief This function computes the avgSNR . - * - * @param priv A pointer to struct lbs_private structure - * @return avgSNR - */ -static u8 lbs_getavgsnr(struct lbs_private *priv) -{ - u8 i; - u16 temp = 0; - if (priv->numSNRNF == 0) - return 0; - for (i = 0; i < priv->numSNRNF; i++) - temp += priv->rawSNR[i]; - return (u8) (temp / priv->numSNRNF); - -} - -/** - * @brief This function computes the AvgNF - * - * @param priv A pointer to struct lbs_private structure - * @return AvgNF - */ -static u8 lbs_getavgnf(struct lbs_private *priv) -{ - u8 i; - u16 temp = 0; - if (priv->numSNRNF == 0) - return 0; - for (i = 0; i < priv->numSNRNF; i++) - temp += priv->rawNF[i]; - return (u8) (temp / priv->numSNRNF); - -} - -/** - * @brief This function save the raw SNR/NF to our internel buffer - * - * @param priv A pointer to struct lbs_private structure - * @param prxpd A pointer to rxpd structure of received packet - * @return n/a - */ -static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd) -{ - if (priv->numSNRNF < DEFAULT_DATA_AVG_FACTOR) - priv->numSNRNF++; - priv->rawSNR[priv->nextSNRNF] = p_rx_pd->snr; - priv->rawNF[priv->nextSNRNF] = p_rx_pd->nf; - priv->nextSNRNF++; - if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR) - priv->nextSNRNF = 0; -} - -/** - * @brief This function computes the RSSI in received packet. - * - * @param priv A pointer to struct lbs_private structure - * @param prxpd A pointer to rxpd structure of received packet - * @return n/a - */ -static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd) -{ - - lbs_deb_enter(LBS_DEB_RX); - - lbs_deb_rx("rxpd: SNR %d, NF %d\n", p_rx_pd->snr, p_rx_pd->nf); - lbs_deb_rx("before computing SNR: SNR-avg = %d, NF-avg = %d\n", - priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE, - priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE); - - priv->SNR[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->snr; - priv->NF[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->nf; - lbs_save_rawSNRNF(priv, p_rx_pd); - - priv->SNR[TYPE_RXPD][TYPE_AVG] = lbs_getavgsnr(priv) * AVG_SCALE; - priv->NF[TYPE_RXPD][TYPE_AVG] = lbs_getavgnf(priv) * AVG_SCALE; - lbs_deb_rx("after computing SNR: SNR-avg = %d, NF-avg = %d\n", - priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE, - priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE); - - priv->RSSI[TYPE_RXPD][TYPE_NOAVG] = - CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_NOAVG], - priv->NF[TYPE_RXPD][TYPE_NOAVG]); - - priv->RSSI[TYPE_RXPD][TYPE_AVG] = - CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE, - priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE); - - lbs_deb_leave(LBS_DEB_RX); -} - /** * @brief This function processes received packet and forwards it * to kernel/upper layer @@ -154,7 +63,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; - if (priv->monitormode) + if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) return process_rxed_802_11_packet(priv, skb); p_rx_pd = (struct rxpd *) skb->data; @@ -225,13 +134,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) */ skb_pull(skb, hdrchop); - /* Take the data rate from the rxpd structure - * only if the rate is auto - */ - if (priv->enablehwauto) - priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate); - - lbs_compute_rssi(priv, p_rx_pd); + priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate); lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); dev->stats.rx_bytes += skb->len; @@ -352,20 +255,18 @@ static int process_rxed_802_11_packet(struct lbs_private *priv, pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr)); memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr)); - /* Take the data rate from the rxpd structure - * only if the rate is auto - */ - if (priv->enablehwauto) - priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate); - - lbs_compute_rssi(priv, prxpd); + priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate); lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; - skb->protocol = eth_type_trans(skb, priv->rtap_net_dev); - netif_rx(skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); ret = 0; diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c deleted file mode 100644 index 24cd54b3a8060955ec7138caf30ce4c95aa5416c..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/scan.c +++ /dev/null @@ -1,1354 +0,0 @@ -/** - * Functions implementing wlan scan IOCTL and firmware command APIs - * - * IOCTL handlers as well as command preperation and response routines - * for sending scan commands to the firmware. - */ -#include -#include -#include -#include -#include -#include -#include - -#include "host.h" -#include "dev.h" -#include "scan.h" -#include "assoc.h" -#include "wext.h" -#include "cmd.h" - -//! Approximate amount of data needed to pass a scan result back to iwlist -#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ - + IEEE80211_MAX_SSID_LEN \ - + IW_EV_UINT_LEN \ - + IW_EV_FREQ_LEN \ - + IW_EV_QUAL_LEN \ - + IEEE80211_MAX_SSID_LEN \ - + IW_EV_PARAM_LEN \ - + 40) /* 40 for WPAIE */ - -//! Memory needed to store a max sized channel List TLV for a firmware scan -#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvl_ie_header) \ - + (MRVDRV_MAX_CHANNELS_PER_SCAN \ - * sizeof(struct chanscanparamset))) - -//! Memory needed to store a max number/size SSID TLV for a firmware scan -#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvl_ie_ssid_param_set)) - -//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max -#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \ - + CHAN_TLV_MAX_SIZE + SSID_TLV_MAX_SIZE) - -//! The maximum number of channels the firmware can scan per command -#define MRVDRV_MAX_CHANNELS_PER_SCAN 14 - -/** - * @brief Number of channels to scan per firmware scan command issuance. - * - * Number restricted to prevent hitting the limit on the amount of scan data - * returned in a single firmware scan command. - */ -#define MRVDRV_CHANNELS_PER_SCAN_CMD 4 - -//! Scan time specified in the channel TLV for each channel for passive scans -#define MRVDRV_PASSIVE_SCAN_CHAN_TIME 100 - -//! Scan time specified in the channel TLV for each channel for active scans -#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100 - -#define DEFAULT_MAX_SCAN_AGE (15 * HZ) - -static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy, - struct cmd_header *resp); - -/*********************************************************************/ -/* */ -/* Misc helper functions */ -/* */ -/*********************************************************************/ - -/** - * @brief Unsets the MSB on basic rates - * - * Scan through an array and unset the MSB for basic data rates. - * - * @param rates buffer of data rates - * @param len size of buffer - */ -static void lbs_unset_basic_rate_flags(u8 *rates, size_t len) -{ - int i; - - for (i = 0; i < len; i++) - rates[i] &= 0x7f; -} - - -static inline void clear_bss_descriptor(struct bss_descriptor *bss) -{ - /* Don't blow away ->list, just BSS data */ - memset(bss, 0, offsetof(struct bss_descriptor, list)); -} - -/** - * @brief Compare two SSIDs - * - * @param ssid1 A pointer to ssid to compare - * @param ssid2 A pointer to ssid to compare - * - * @return 0: ssid is same, otherwise is different - */ -int lbs_ssid_cmp(uint8_t *ssid1, uint8_t ssid1_len, uint8_t *ssid2, - uint8_t ssid2_len) -{ - if (ssid1_len != ssid2_len) - return -1; - - return memcmp(ssid1, ssid2, ssid1_len); -} - -static inline int is_same_network(struct bss_descriptor *src, - struct bss_descriptor *dst) -{ - /* A network is only a duplicate if the channel, BSSID, and ESSID - * all match. We treat all with the same BSSID and channel - * as one network */ - return ((src->ssid_len == dst->ssid_len) && - (src->channel == dst->channel) && - !compare_ether_addr(src->bssid, dst->bssid) && - !memcmp(src->ssid, dst->ssid, src->ssid_len)); -} - - - -/*********************************************************************/ -/* */ -/* Region channel support */ -/* */ -/*********************************************************************/ - -#define LBS_TX_PWR_DEFAULT 20 /*100mW */ -#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */ -#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */ -#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */ -#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */ - -/* Format { channel, frequency (MHz), maxtxpower } */ -/* band: 'B/G', region: USA FCC/Canada IC */ -static struct chan_freq_power channel_freq_power_US_BG[] = { - {1, 2412, LBS_TX_PWR_US_DEFAULT}, - {2, 2417, LBS_TX_PWR_US_DEFAULT}, - {3, 2422, LBS_TX_PWR_US_DEFAULT}, - {4, 2427, LBS_TX_PWR_US_DEFAULT}, - {5, 2432, LBS_TX_PWR_US_DEFAULT}, - {6, 2437, LBS_TX_PWR_US_DEFAULT}, - {7, 2442, LBS_TX_PWR_US_DEFAULT}, - {8, 2447, LBS_TX_PWR_US_DEFAULT}, - {9, 2452, LBS_TX_PWR_US_DEFAULT}, - {10, 2457, LBS_TX_PWR_US_DEFAULT}, - {11, 2462, LBS_TX_PWR_US_DEFAULT} -}; - -/* band: 'B/G', region: Europe ETSI */ -static struct chan_freq_power channel_freq_power_EU_BG[] = { - {1, 2412, LBS_TX_PWR_EMEA_DEFAULT}, - {2, 2417, LBS_TX_PWR_EMEA_DEFAULT}, - {3, 2422, LBS_TX_PWR_EMEA_DEFAULT}, - {4, 2427, LBS_TX_PWR_EMEA_DEFAULT}, - {5, 2432, LBS_TX_PWR_EMEA_DEFAULT}, - {6, 2437, LBS_TX_PWR_EMEA_DEFAULT}, - {7, 2442, LBS_TX_PWR_EMEA_DEFAULT}, - {8, 2447, LBS_TX_PWR_EMEA_DEFAULT}, - {9, 2452, LBS_TX_PWR_EMEA_DEFAULT}, - {10, 2457, LBS_TX_PWR_EMEA_DEFAULT}, - {11, 2462, LBS_TX_PWR_EMEA_DEFAULT}, - {12, 2467, LBS_TX_PWR_EMEA_DEFAULT}, - {13, 2472, LBS_TX_PWR_EMEA_DEFAULT} -}; - -/* band: 'B/G', region: Spain */ -static struct chan_freq_power channel_freq_power_SPN_BG[] = { - {10, 2457, LBS_TX_PWR_DEFAULT}, - {11, 2462, LBS_TX_PWR_DEFAULT} -}; - -/* band: 'B/G', region: France */ -static struct chan_freq_power channel_freq_power_FR_BG[] = { - {10, 2457, LBS_TX_PWR_FR_DEFAULT}, - {11, 2462, LBS_TX_PWR_FR_DEFAULT}, - {12, 2467, LBS_TX_PWR_FR_DEFAULT}, - {13, 2472, LBS_TX_PWR_FR_DEFAULT} -}; - -/* band: 'B/G', region: Japan */ -static struct chan_freq_power channel_freq_power_JPN_BG[] = { - {1, 2412, LBS_TX_PWR_JP_DEFAULT}, - {2, 2417, LBS_TX_PWR_JP_DEFAULT}, - {3, 2422, LBS_TX_PWR_JP_DEFAULT}, - {4, 2427, LBS_TX_PWR_JP_DEFAULT}, - {5, 2432, LBS_TX_PWR_JP_DEFAULT}, - {6, 2437, LBS_TX_PWR_JP_DEFAULT}, - {7, 2442, LBS_TX_PWR_JP_DEFAULT}, - {8, 2447, LBS_TX_PWR_JP_DEFAULT}, - {9, 2452, LBS_TX_PWR_JP_DEFAULT}, - {10, 2457, LBS_TX_PWR_JP_DEFAULT}, - {11, 2462, LBS_TX_PWR_JP_DEFAULT}, - {12, 2467, LBS_TX_PWR_JP_DEFAULT}, - {13, 2472, LBS_TX_PWR_JP_DEFAULT}, - {14, 2484, LBS_TX_PWR_JP_DEFAULT} -}; - -/** - * the structure for channel, frequency and power - */ -struct region_cfp_table { - u8 region; - struct chan_freq_power *cfp_BG; - int cfp_no_BG; -}; - -/** - * the structure for the mapping between region and CFP - */ -static struct region_cfp_table region_cfp_table[] = { - {0x10, /*US FCC */ - channel_freq_power_US_BG, - ARRAY_SIZE(channel_freq_power_US_BG), - } - , - {0x20, /*CANADA IC */ - channel_freq_power_US_BG, - ARRAY_SIZE(channel_freq_power_US_BG), - } - , - {0x30, /*EU*/ channel_freq_power_EU_BG, - ARRAY_SIZE(channel_freq_power_EU_BG), - } - , - {0x31, /*SPAIN*/ channel_freq_power_SPN_BG, - ARRAY_SIZE(channel_freq_power_SPN_BG), - } - , - {0x32, /*FRANCE*/ channel_freq_power_FR_BG, - ARRAY_SIZE(channel_freq_power_FR_BG), - } - , - {0x40, /*JAPAN*/ channel_freq_power_JPN_BG, - ARRAY_SIZE(channel_freq_power_JPN_BG), - } - , -/*Add new region here */ -}; - -/** - * @brief This function finds the CFP in - * region_cfp_table based on region and band parameter. - * - * @param region The region code - * @param band The band - * @param cfp_no A pointer to CFP number - * @return A pointer to CFP - */ -static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no) -{ - int i, end; - - lbs_deb_enter(LBS_DEB_MAIN); - - end = ARRAY_SIZE(region_cfp_table); - - for (i = 0; i < end ; i++) { - lbs_deb_main("region_cfp_table[i].region=%d\n", - region_cfp_table[i].region); - if (region_cfp_table[i].region == region) { - *cfp_no = region_cfp_table[i].cfp_no_BG; - lbs_deb_leave(LBS_DEB_MAIN); - return region_cfp_table[i].cfp_BG; - } - } - - lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL"); - return NULL; -} - -int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band) -{ - int ret = 0; - int i = 0; - - struct chan_freq_power *cfp; - int cfp_no; - - lbs_deb_enter(LBS_DEB_MAIN); - - memset(priv->region_channel, 0, sizeof(priv->region_channel)); - - cfp = lbs_get_region_cfp_table(region, &cfp_no); - if (cfp != NULL) { - priv->region_channel[i].nrcfp = cfp_no; - priv->region_channel[i].CFP = cfp; - } else { - lbs_deb_main("wrong region code %#x in band B/G\n", - region); - ret = -1; - goto out; - } - priv->region_channel[i].valid = 1; - priv->region_channel[i].region = region; - priv->region_channel[i].band = band; - i++; -out: - lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); - return ret; -} - - - - -/*********************************************************************/ -/* */ -/* Main scanning support */ -/* */ -/*********************************************************************/ - -/** - * @brief Create a channel list for the driver to scan based on region info - * - * Only used from lbs_scan_setup_scan_config() - * - * Use the driver region/band information to construct a comprehensive list - * of channels to scan. This routine is used for any scan that is not - * provided a specific channel list to scan. - * - * @param priv A pointer to struct lbs_private structure - * @param scanchanlist Output parameter: resulting channel list to scan - * - * @return void - */ -static int lbs_scan_create_channel_list(struct lbs_private *priv, - struct chanscanparamset *scanchanlist) -{ - struct region_channel *scanregion; - struct chan_freq_power *cfp; - int rgnidx; - int chanidx; - int nextchan; - uint8_t scantype; - - chanidx = 0; - - /* Set the default scan type to the user specified type, will later - * be changed to passive on a per channel basis if restricted by - * regulatory requirements (11d or 11h) - */ - scantype = CMD_SCAN_TYPE_ACTIVE; - - for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) { - if (!priv->region_channel[rgnidx].valid) - continue; - scanregion = &priv->region_channel[rgnidx]; - - for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) { - struct chanscanparamset *chan = &scanchanlist[chanidx]; - - cfp = scanregion->CFP + nextchan; - - if (scanregion->band == BAND_B || scanregion->band == BAND_G) - chan->radiotype = CMD_SCAN_RADIO_TYPE_BG; - - if (scantype == CMD_SCAN_TYPE_PASSIVE) { - chan->maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME); - chan->chanscanmode.passivescan = 1; - } else { - chan->maxscantime = cpu_to_le16(MRVDRV_ACTIVE_SCAN_CHAN_TIME); - chan->chanscanmode.passivescan = 0; - } - - chan->channumber = cfp->channel; - } - } - return chanidx; -} - -/* - * Add SSID TLV of the form: - * - * TLV-ID SSID 00 00 - * length 06 00 - * ssid 4d 4e 54 45 53 54 - */ -static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv) -{ - struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv; - - ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID); - ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len); - memcpy(ssid_tlv->ssid, priv->scan_ssid, priv->scan_ssid_len); - return sizeof(ssid_tlv->header) + priv->scan_ssid_len; -} - -/* - * Add CHANLIST TLV of the form - * - * TLV-ID CHANLIST 01 01 - * length 5b 00 - * channel 1 00 01 00 00 00 64 00 - * radio type 00 - * channel 01 - * scan type 00 - * min scan time 00 00 - * max scan time 64 00 - * channel 2 00 02 00 00 00 64 00 - * channel 3 00 03 00 00 00 64 00 - * channel 4 00 04 00 00 00 64 00 - * channel 5 00 05 00 00 00 64 00 - * channel 6 00 06 00 00 00 64 00 - * channel 7 00 07 00 00 00 64 00 - * channel 8 00 08 00 00 00 64 00 - * channel 9 00 09 00 00 00 64 00 - * channel 10 00 0a 00 00 00 64 00 - * channel 11 00 0b 00 00 00 64 00 - * channel 12 00 0c 00 00 00 64 00 - * channel 13 00 0d 00 00 00 64 00 - * - */ -static int lbs_scan_add_chanlist_tlv(uint8_t *tlv, - struct chanscanparamset *chan_list, - int chan_count) -{ - size_t size = sizeof(struct chanscanparamset) *chan_count; - struct mrvl_ie_chanlist_param_set *chan_tlv = (void *)tlv; - - chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); - memcpy(chan_tlv->chanscanparam, chan_list, size); - chan_tlv->header.len = cpu_to_le16(size); - return sizeof(chan_tlv->header) + size; -} - -/* - * Add RATES TLV of the form - * - * TLV-ID RATES 01 00 - * length 0e 00 - * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c - * - * The rates are in lbs_bg_rates[], but for the 802.11b - * rates the high bit isn't set. - */ -static int lbs_scan_add_rates_tlv(uint8_t *tlv) -{ - int i; - struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; - - rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); - tlv += sizeof(rate_tlv->header); - for (i = 0; i < MAX_RATES; i++) { - *tlv = lbs_bg_rates[i]; - if (*tlv == 0) - break; - /* This code makes sure that the 802.11b rates (1 MBit/s, 2 - MBit/s, 5.5 MBit/s and 11 MBit/s get's the high bit set. - Note that the values are MBit/s * 2, to mark them as - basic rates so that the firmware likes it better */ - if (*tlv == 0x02 || *tlv == 0x04 || - *tlv == 0x0b || *tlv == 0x16) - *tlv |= 0x80; - tlv++; - } - rate_tlv->header.len = cpu_to_le16(i); - return sizeof(rate_tlv->header) + i; -} - -/* - * Generate the CMD_802_11_SCAN command with the proper tlv - * for a bunch of channels. - */ -static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype, - struct chanscanparamset *chan_list, int chan_count) -{ - int ret = -ENOMEM; - struct cmd_ds_802_11_scan *scan_cmd; - uint8_t *tlv; /* pointer into our current, growing TLV storage area */ - - lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d", - bsstype, chan_list ? chan_list[0].channumber : -1, - chan_count); - - /* create the fixed part for scan command */ - scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL); - if (scan_cmd == NULL) - goto out; - - tlv = scan_cmd->tlvbuffer; - /* TODO: do we need to scan for a specific BSSID? - memcpy(scan_cmd->bssid, priv->scan_bssid, ETH_ALEN); */ - scan_cmd->bsstype = bsstype; - - /* add TLVs */ - if (priv->scan_ssid_len) - tlv += lbs_scan_add_ssid_tlv(priv, tlv); - if (chan_list && chan_count) - tlv += lbs_scan_add_chanlist_tlv(tlv, chan_list, chan_count); - tlv += lbs_scan_add_rates_tlv(tlv); - - /* This is the final data we are about to send */ - scan_cmd->hdr.size = cpu_to_le16(tlv - (uint8_t *)scan_cmd); - lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd, - sizeof(*scan_cmd)); - lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer, - tlv - scan_cmd->tlvbuffer); - - ret = __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr, - le16_to_cpu(scan_cmd->hdr.size), - lbs_ret_80211_scan, 0); - -out: - kfree(scan_cmd); - lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); - return ret; -} - -/** - * @brief Internal function used to start a scan based on an input config - * - * Use the input user scan configuration information when provided in - * order to send the appropriate scan commands to firmware to populate or - * update the internal driver scan table - * - * @param priv A pointer to struct lbs_private structure - * @param full_scan Do a full-scan (blocking) - * - * @return 0 or < 0 if error - */ -int lbs_scan_networks(struct lbs_private *priv, int full_scan) -{ - int ret = -ENOMEM; - struct chanscanparamset *chan_list; - struct chanscanparamset *curr_chans; - int chan_count; - uint8_t bsstype = CMD_BSS_TYPE_ANY; - int numchannels = MRVDRV_CHANNELS_PER_SCAN_CMD; - union iwreq_data wrqu; -#ifdef CONFIG_LIBERTAS_DEBUG - struct bss_descriptor *iter; - int i = 0; - DECLARE_SSID_BUF(ssid); -#endif - - lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan); - - /* Cancel any partial outstanding partial scans if this scan - * is a full scan. - */ - if (full_scan && delayed_work_pending(&priv->scan_work)) - cancel_delayed_work(&priv->scan_work); - - /* User-specified bsstype or channel list - TODO: this can be implemented if some user-space application - need the feature. Formerly, it was accessible from debugfs, - but then nowhere used. - if (user_cfg) { - if (user_cfg->bsstype) - bsstype = user_cfg->bsstype; - } */ - - lbs_deb_scan("numchannels %d, bsstype %d\n", numchannels, bsstype); - - /* Create list of channels to scan */ - chan_list = kzalloc(sizeof(struct chanscanparamset) * - LBS_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL); - if (!chan_list) { - lbs_pr_alert("SCAN: chan_list empty\n"); - goto out; - } - - /* We want to scan all channels */ - chan_count = lbs_scan_create_channel_list(priv, chan_list); - - netif_stop_queue(priv->dev); - if (priv->mesh_dev) - netif_stop_queue(priv->mesh_dev); - - /* Prepare to continue an interrupted scan */ - lbs_deb_scan("chan_count %d, scan_channel %d\n", - chan_count, priv->scan_channel); - curr_chans = chan_list; - /* advance channel list by already-scanned-channels */ - if (priv->scan_channel > 0) { - curr_chans += priv->scan_channel; - chan_count -= priv->scan_channel; - } - - /* Send scan command(s) - * numchannels contains the number of channels we should maximally scan - * chan_count is the total number of channels to scan - */ - - while (chan_count) { - int to_scan = min(numchannels, chan_count); - lbs_deb_scan("scanning %d of %d channels\n", - to_scan, chan_count); - ret = lbs_do_scan(priv, bsstype, curr_chans, - to_scan); - if (ret) { - lbs_pr_err("SCAN_CMD failed\n"); - goto out2; - } - curr_chans += to_scan; - chan_count -= to_scan; - - /* somehow schedule the next part of the scan */ - if (chan_count && !full_scan && - !priv->surpriseremoved) { - /* -1 marks just that we're currently scanning */ - if (priv->scan_channel < 0) - priv->scan_channel = to_scan; - else - priv->scan_channel += to_scan; - cancel_delayed_work(&priv->scan_work); - queue_delayed_work(priv->work_thread, &priv->scan_work, - msecs_to_jiffies(300)); - /* skip over GIWSCAN event */ - goto out; - } - - } - memset(&wrqu, 0, sizeof(union iwreq_data)); - wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL); - -#ifdef CONFIG_LIBERTAS_DEBUG - /* Dump the scan table */ - mutex_lock(&priv->lock); - lbs_deb_scan("scan table:\n"); - list_for_each_entry(iter, &priv->network_list, list) - lbs_deb_scan("%02d: BSSID %pM, RSSI %d, SSID '%s'\n", - i++, iter->bssid, iter->rssi, - print_ssid(ssid, iter->ssid, iter->ssid_len)); - mutex_unlock(&priv->lock); -#endif - -out2: - priv->scan_channel = 0; - -out: - if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) - netif_wake_queue(priv->dev); - - if (priv->mesh_dev && lbs_mesh_connected(priv) && - !priv->tx_pending_len) - netif_wake_queue(priv->mesh_dev); - - kfree(chan_list); - - lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); - return ret; -} - -void lbs_scan_worker(struct work_struct *work) -{ - struct lbs_private *priv = - container_of(work, struct lbs_private, scan_work.work); - - lbs_deb_enter(LBS_DEB_SCAN); - lbs_scan_networks(priv, 0); - lbs_deb_leave(LBS_DEB_SCAN); -} - - -/*********************************************************************/ -/* */ -/* Result interpretation */ -/* */ -/*********************************************************************/ - -/** - * @brief Interpret a BSS scan response returned from the firmware - * - * Parse the various fixed fields and IEs passed back for a a BSS probe - * response or beacon from the scan command. Record information as needed - * in the scan table struct bss_descriptor for that entry. - * - * @param bss Output parameter: Pointer to the BSS Entry - * - * @return 0 or -1 - */ -static int lbs_process_bss(struct bss_descriptor *bss, - uint8_t **pbeaconinfo, int *bytesleft) -{ - struct ieee_ie_fh_param_set *fh; - struct ieee_ie_ds_param_set *ds; - struct ieee_ie_cf_param_set *cf; - struct ieee_ie_ibss_param_set *ibss; - DECLARE_SSID_BUF(ssid); - uint8_t *pos, *end, *p; - uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; - uint16_t beaconsize = 0; - int ret; - - lbs_deb_enter(LBS_DEB_SCAN); - - if (*bytesleft >= sizeof(beaconsize)) { - /* Extract & convert beacon size from the command buffer */ - beaconsize = get_unaligned_le16(*pbeaconinfo); - *bytesleft -= sizeof(beaconsize); - *pbeaconinfo += sizeof(beaconsize); - } - - if (beaconsize == 0 || beaconsize > *bytesleft) { - *pbeaconinfo += *bytesleft; - *bytesleft = 0; - ret = -1; - goto done; - } - - /* Initialize the current working beacon pointer for this BSS iteration */ - pos = *pbeaconinfo; - end = pos + beaconsize; - - /* Advance the return beacon pointer past the current beacon */ - *pbeaconinfo += beaconsize; - *bytesleft -= beaconsize; - - memcpy(bss->bssid, pos, ETH_ALEN); - lbs_deb_scan("process_bss: BSSID %pM\n", bss->bssid); - pos += ETH_ALEN; - - if ((end - pos) < 12) { - lbs_deb_scan("process_bss: Not enough bytes left\n"); - ret = -1; - goto done; - } - - /* - * next 4 fields are RSSI, time stamp, beacon interval, - * and capability information - */ - - /* RSSI is 1 byte long */ - bss->rssi = *pos; - lbs_deb_scan("process_bss: RSSI %d\n", *pos); - pos++; - - /* time stamp is 8 bytes long */ - pos += 8; - - /* beacon interval is 2 bytes long */ - bss->beaconperiod = get_unaligned_le16(pos); - pos += 2; - - /* capability information is 2 bytes long */ - bss->capability = get_unaligned_le16(pos); - lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability); - pos += 2; - - if (bss->capability & WLAN_CAPABILITY_PRIVACY) - lbs_deb_scan("process_bss: WEP enabled\n"); - if (bss->capability & WLAN_CAPABILITY_IBSS) - bss->mode = IW_MODE_ADHOC; - else - bss->mode = IW_MODE_INFRA; - - /* rest of the current buffer are IE's */ - lbs_deb_scan("process_bss: IE len %zd\n", end - pos); - lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos); - - /* process variable IE */ - while (pos <= end - 2) { - if (pos + pos[1] > end) { - lbs_deb_scan("process_bss: error in processing IE, " - "bytes left < IE length\n"); - break; - } - - switch (pos[0]) { - case WLAN_EID_SSID: - bss->ssid_len = min_t(int, IEEE80211_MAX_SSID_LEN, pos[1]); - memcpy(bss->ssid, pos + 2, bss->ssid_len); - lbs_deb_scan("got SSID IE: '%s', len %u\n", - print_ssid(ssid, bss->ssid, bss->ssid_len), - bss->ssid_len); - break; - - case WLAN_EID_SUPP_RATES: - n_basic_rates = min_t(uint8_t, MAX_RATES, pos[1]); - memcpy(bss->rates, pos + 2, n_basic_rates); - got_basic_rates = 1; - lbs_deb_scan("got RATES IE\n"); - break; - - case WLAN_EID_FH_PARAMS: - fh = (struct ieee_ie_fh_param_set *) pos; - memcpy(&bss->phy.fh, fh, sizeof(*fh)); - lbs_deb_scan("got FH IE\n"); - break; - - case WLAN_EID_DS_PARAMS: - ds = (struct ieee_ie_ds_param_set *) pos; - bss->channel = ds->channel; - memcpy(&bss->phy.ds, ds, sizeof(*ds)); - lbs_deb_scan("got DS IE, channel %d\n", bss->channel); - break; - - case WLAN_EID_CF_PARAMS: - cf = (struct ieee_ie_cf_param_set *) pos; - memcpy(&bss->ss.cf, cf, sizeof(*cf)); - lbs_deb_scan("got CF IE\n"); - break; - - case WLAN_EID_IBSS_PARAMS: - ibss = (struct ieee_ie_ibss_param_set *) pos; - bss->atimwindow = ibss->atimwindow; - memcpy(&bss->ss.ibss, ibss, sizeof(*ibss)); - lbs_deb_scan("got IBSS IE\n"); - break; - - case WLAN_EID_EXT_SUPP_RATES: - /* only process extended supported rate if data rate is - * already found. Data rate IE should come before - * extended supported rate IE - */ - lbs_deb_scan("got RATESEX IE\n"); - if (!got_basic_rates) { - lbs_deb_scan("... but ignoring it\n"); - break; - } - - n_ex_rates = pos[1]; - if (n_basic_rates + n_ex_rates > MAX_RATES) - n_ex_rates = MAX_RATES - n_basic_rates; - - p = bss->rates + n_basic_rates; - memcpy(p, pos + 2, n_ex_rates); - break; - - case WLAN_EID_GENERIC: - if (pos[1] >= 4 && - pos[2] == 0x00 && pos[3] == 0x50 && - pos[4] == 0xf2 && pos[5] == 0x01) { - bss->wpa_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN); - memcpy(bss->wpa_ie, pos, bss->wpa_ie_len); - lbs_deb_scan("got WPA IE\n"); - lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie, - bss->wpa_ie_len); - } else if (pos[1] >= MARVELL_MESH_IE_LENGTH && - pos[2] == 0x00 && pos[3] == 0x50 && - pos[4] == 0x43 && pos[5] == 0x04) { - lbs_deb_scan("got mesh IE\n"); - bss->mesh = 1; - } else { - lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n", - pos[2], pos[3], - pos[4], pos[5], - pos[1]); - } - break; - - case WLAN_EID_RSN: - lbs_deb_scan("got RSN IE\n"); - bss->rsn_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN); - memcpy(bss->rsn_ie, pos, bss->rsn_ie_len); - lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE", - bss->rsn_ie, bss->rsn_ie_len); - break; - - default: - lbs_deb_scan("got IE 0x%04x, len %d\n", - pos[0], pos[1]); - break; - } - - pos += pos[1] + 2; - } - - /* Timestamp */ - bss->last_scanned = jiffies; - lbs_unset_basic_rate_flags(bss->rates, sizeof(bss->rates)); - - ret = 0; - -done: - lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); - return ret; -} - -/** - * @brief Send a scan command for all available channels filtered on a spec - * - * Used in association code and from debugfs - * - * @param priv A pointer to struct lbs_private structure - * @param ssid A pointer to the SSID to scan for - * @param ssid_len Length of the SSID - * - * @return 0-success, otherwise fail - */ -int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid, - uint8_t ssid_len) -{ - DECLARE_SSID_BUF(ssid_buf); - int ret = 0; - - lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n", - print_ssid(ssid_buf, ssid, ssid_len)); - - if (!ssid_len) - goto out; - - memcpy(priv->scan_ssid, ssid, ssid_len); - priv->scan_ssid_len = ssid_len; - - lbs_scan_networks(priv, 1); - if (priv->surpriseremoved) { - ret = -1; - goto out; - } - -out: - lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); - return ret; -} - - - - -/*********************************************************************/ -/* */ -/* Support for Wireless Extensions */ -/* */ -/*********************************************************************/ - - -#define MAX_CUSTOM_LEN 64 - -static inline char *lbs_translate_scan(struct lbs_private *priv, - struct iw_request_info *info, - char *start, char *stop, - struct bss_descriptor *bss) -{ - struct chan_freq_power *cfp; - char *current_val; /* For rates */ - struct iw_event iwe; /* Temporary buffer */ - int j; -#define PERFECT_RSSI ((uint8_t)50) -#define WORST_RSSI ((uint8_t)0) -#define RSSI_DIFF ((uint8_t)(PERFECT_RSSI - WORST_RSSI)) - uint8_t rssi; - - lbs_deb_enter(LBS_DEB_SCAN); - - cfp = lbs_find_cfp_by_band_and_channel(priv, 0, bss->channel); - if (!cfp) { - lbs_deb_scan("Invalid channel number %d\n", bss->channel); - start = NULL; - goto out; - } - - /* First entry *MUST* be the BSSID */ - iwe.cmd = SIOCGIWAP; - iwe.u.ap_addr.sa_family = ARPHRD_ETHER; - memcpy(iwe.u.ap_addr.sa_data, &bss->bssid, ETH_ALEN); - start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); - - /* SSID */ - iwe.cmd = SIOCGIWESSID; - iwe.u.data.flags = 1; - iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN); - start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid); - - /* Mode */ - iwe.cmd = SIOCGIWMODE; - iwe.u.mode = bss->mode; - start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN); - - /* Frequency */ - iwe.cmd = SIOCGIWFREQ; - iwe.u.freq.m = (long)cfp->freq * 100000; - iwe.u.freq.e = 1; - start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); - - /* Add quality statistics */ - iwe.cmd = IWEVQUAL; - iwe.u.qual.updated = IW_QUAL_ALL_UPDATED; - iwe.u.qual.level = SCAN_RSSI(bss->rssi); - - rssi = iwe.u.qual.level - MRVDRV_NF_DEFAULT_SCAN_VALUE; - iwe.u.qual.qual = - (100 * RSSI_DIFF * RSSI_DIFF - (PERFECT_RSSI - rssi) * - (15 * (RSSI_DIFF) + 62 * (PERFECT_RSSI - rssi))) / - (RSSI_DIFF * RSSI_DIFF); - if (iwe.u.qual.qual > 100) - iwe.u.qual.qual = 100; - - if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) { - iwe.u.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE; - } else { - iwe.u.qual.noise = CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]); - } - - /* Locally created ad-hoc BSSs won't have beacons if this is the - * only station in the adhoc network; so get signal strength - * from receive statistics. - */ - if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate - && !lbs_ssid_cmp(priv->curbssparams.ssid, - priv->curbssparams.ssid_len, - bss->ssid, bss->ssid_len)) { - int snr, nf; - snr = priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE; - nf = priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE; - iwe.u.qual.level = CAL_RSSI(snr, nf); - } - start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); - - /* Add encryption capability */ - iwe.cmd = SIOCGIWENCODE; - if (bss->capability & WLAN_CAPABILITY_PRIVACY) { - iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; - } else { - iwe.u.data.flags = IW_ENCODE_DISABLED; - } - iwe.u.data.length = 0; - start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid); - - current_val = start + iwe_stream_lcp_len(info); - - iwe.cmd = SIOCGIWRATE; - iwe.u.bitrate.fixed = 0; - iwe.u.bitrate.disabled = 0; - iwe.u.bitrate.value = 0; - - for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) { - /* Bit rate given in 500 kb/s units */ - iwe.u.bitrate.value = bss->rates[j] * 500000; - current_val = iwe_stream_add_value(info, start, current_val, - stop, &iwe, IW_EV_PARAM_LEN); - } - if ((bss->mode == IW_MODE_ADHOC) && priv->adhoccreate - && !lbs_ssid_cmp(priv->curbssparams.ssid, - priv->curbssparams.ssid_len, - bss->ssid, bss->ssid_len)) { - iwe.u.bitrate.value = 22 * 500000; - current_val = iwe_stream_add_value(info, start, current_val, - stop, &iwe, IW_EV_PARAM_LEN); - } - /* Check if we added any event */ - if ((current_val - start) > iwe_stream_lcp_len(info)) - start = current_val; - - memset(&iwe, 0, sizeof(iwe)); - if (bss->wpa_ie_len) { - char buf[MAX_WPA_IE_LEN]; - memcpy(buf, bss->wpa_ie, bss->wpa_ie_len); - iwe.cmd = IWEVGENIE; - iwe.u.data.length = bss->wpa_ie_len; - start = iwe_stream_add_point(info, start, stop, &iwe, buf); - } - - memset(&iwe, 0, sizeof(iwe)); - if (bss->rsn_ie_len) { - char buf[MAX_WPA_IE_LEN]; - memcpy(buf, bss->rsn_ie, bss->rsn_ie_len); - iwe.cmd = IWEVGENIE; - iwe.u.data.length = bss->rsn_ie_len; - start = iwe_stream_add_point(info, start, stop, &iwe, buf); - } - - if (bss->mesh) { - char custom[MAX_CUSTOM_LEN]; - char *p = custom; - - iwe.cmd = IWEVCUSTOM; - p += snprintf(p, MAX_CUSTOM_LEN, "mesh-type: olpc"); - iwe.u.data.length = p - custom; - if (iwe.u.data.length) - start = iwe_stream_add_point(info, start, stop, - &iwe, custom); - } - -out: - lbs_deb_leave_args(LBS_DEB_SCAN, "start %p", start); - return start; -} - - -/** - * @brief Handle Scan Network ioctl - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param vwrq A pointer to iw_param structure - * @param extra A pointer to extra data buf - * - * @return 0 --success, otherwise fail - */ -int lbs_set_scan(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *wrqu, char *extra) -{ - DECLARE_SSID_BUF(ssid); - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (!priv->radio_on) { - ret = -EINVAL; - goto out; - } - - if (!netif_running(dev)) { - ret = -ENETDOWN; - goto out; - } - - /* mac80211 does this: - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (sdata->type != IEEE80211_IF_TYPE_xxx) { - ret = -EOPNOTSUPP; - goto out; - } - */ - - if (wrqu->data.length == sizeof(struct iw_scan_req) && - wrqu->data.flags & IW_SCAN_THIS_ESSID) { - struct iw_scan_req *req = (struct iw_scan_req *)extra; - priv->scan_ssid_len = req->essid_len; - memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); - lbs_deb_wext("set_scan, essid '%s'\n", - print_ssid(ssid, priv->scan_ssid, priv->scan_ssid_len)); - } else { - priv->scan_ssid_len = 0; - } - - if (!delayed_work_pending(&priv->scan_work)) - queue_delayed_work(priv->work_thread, &priv->scan_work, - msecs_to_jiffies(50)); - /* set marker that currently a scan is taking place */ - priv->scan_channel = -1; - - if (priv->surpriseremoved) - ret = -EIO; - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - - -/** - * @brief Handle Retrieve scan table ioctl - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param dwrq A pointer to iw_point structure - * @param extra A pointer to extra data buf - * - * @return 0 --success, otherwise fail - */ -int lbs_get_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ -#define SCAN_ITEM_SIZE 128 - struct lbs_private *priv = dev->ml_priv; - int err = 0; - char *ev = extra; - char *stop = ev + dwrq->length; - struct bss_descriptor *iter_bss; - struct bss_descriptor *safe; - - lbs_deb_enter(LBS_DEB_WEXT); - - /* iwlist should wait until the current scan is finished */ - if (priv->scan_channel) - return -EAGAIN; - - /* Update RSSI if current BSS is a locally created ad-hoc BSS */ - if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) { - err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, - CMD_OPTION_WAITFORRSP, 0, NULL); - if (err) - goto out; - } - - mutex_lock(&priv->lock); - list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { - char *next_ev; - unsigned long stale_time; - - if (stop - ev < SCAN_ITEM_SIZE) { - err = -E2BIG; - break; - } - - /* For mesh device, list only mesh networks */ - if (dev == priv->mesh_dev && !iter_bss->mesh) - continue; - - /* Prune old an old scan result */ - stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE; - if (time_after(jiffies, stale_time)) { - list_move_tail(&iter_bss->list, &priv->network_free_list); - clear_bss_descriptor(iter_bss); - continue; - } - - /* Translate to WE format this entry */ - next_ev = lbs_translate_scan(priv, info, ev, stop, iter_bss); - if (next_ev == NULL) - continue; - ev = next_ev; - } - mutex_unlock(&priv->lock); - - dwrq->length = (ev - extra); - dwrq->flags = 0; -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err); - return err; -} - - - - -/*********************************************************************/ -/* */ -/* Command execution */ -/* */ -/*********************************************************************/ - - -/** - * @brief This function handles the command response of scan - * - * Called from handle_cmd_response() in cmdrespc. - * - * The response buffer for the scan command has the following - * memory layout: - * - * .-----------------------------------------------------------. - * | header (4 * sizeof(u16)): Standard command response hdr | - * .-----------------------------------------------------------. - * | bufsize (u16) : sizeof the BSS Description data | - * .-----------------------------------------------------------. - * | NumOfSet (u8) : Number of BSS Descs returned | - * .-----------------------------------------------------------. - * | BSSDescription data (variable, size given in bufsize) | - * .-----------------------------------------------------------. - * | TLV data (variable, size calculated using header->size, | - * | bufsize and sizeof the fixed fields above) | - * .-----------------------------------------------------------. - * - * @param priv A pointer to struct lbs_private structure - * @param resp A pointer to cmd_ds_command - * - * @return 0 or -1 - */ -static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy, - struct cmd_header *resp) -{ - struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; - struct bss_descriptor *iter_bss; - struct bss_descriptor *safe; - uint8_t *bssinfo; - uint16_t scanrespsize; - int bytesleft; - int idx; - int tlvbufsize; - int ret; - - lbs_deb_enter(LBS_DEB_SCAN); - - /* Prune old entries from scan table */ - list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { - unsigned long stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE; - if (time_before(jiffies, stale_time)) - continue; - list_move_tail (&iter_bss->list, &priv->network_free_list); - clear_bss_descriptor(iter_bss); - } - - if (scanresp->nr_sets > MAX_NETWORK_COUNT) { - lbs_deb_scan("SCAN_RESP: too many scan results (%d, max %d)\n", - scanresp->nr_sets, MAX_NETWORK_COUNT); - ret = -1; - goto done; - } - - bytesleft = get_unaligned_le16(&scanresp->bssdescriptsize); - lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft); - - scanrespsize = le16_to_cpu(resp->size); - lbs_deb_scan("SCAN_RESP: scan results %d\n", scanresp->nr_sets); - - bssinfo = scanresp->bssdesc_and_tlvbuffer; - - /* The size of the TLV buffer is equal to the entire command response - * size (scanrespsize) minus the fixed fields (sizeof()'s), the - * BSS Descriptions (bssdescriptsize as bytesLef) and the command - * response header (sizeof(struct cmd_header)) - */ - tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize) - + sizeof(scanresp->nr_sets) - + sizeof(struct cmd_header)); - - /* - * Process each scan response returned (scanresp->nr_sets). Save - * the information in the newbssentry and then insert into the - * driver scan table either as an update to an existing entry - * or as an addition at the end of the table - */ - for (idx = 0; idx < scanresp->nr_sets && bytesleft; idx++) { - struct bss_descriptor new; - struct bss_descriptor *found = NULL; - struct bss_descriptor *oldest = NULL; - - /* Process the data fields and IEs returned for this BSS */ - memset(&new, 0, sizeof (struct bss_descriptor)); - if (lbs_process_bss(&new, &bssinfo, &bytesleft) != 0) { - /* error parsing the scan response, skipped */ - lbs_deb_scan("SCAN_RESP: process_bss returned ERROR\n"); - continue; - } - - /* Try to find this bss in the scan table */ - list_for_each_entry (iter_bss, &priv->network_list, list) { - if (is_same_network(iter_bss, &new)) { - found = iter_bss; - break; - } - - if ((oldest == NULL) || - (iter_bss->last_scanned < oldest->last_scanned)) - oldest = iter_bss; - } - - if (found) { - /* found, clear it */ - clear_bss_descriptor(found); - } else if (!list_empty(&priv->network_free_list)) { - /* Pull one from the free list */ - found = list_entry(priv->network_free_list.next, - struct bss_descriptor, list); - list_move_tail(&found->list, &priv->network_list); - } else if (oldest) { - /* If there are no more slots, expire the oldest */ - found = oldest; - clear_bss_descriptor(found); - list_move_tail(&found->list, &priv->network_list); - } else { - continue; - } - - lbs_deb_scan("SCAN_RESP: BSSID %pM\n", new.bssid); - - /* Copy the locally created newbssentry to the scan table */ - memcpy(found, &new, offsetof(struct bss_descriptor, list)); - } - - ret = 0; - -done: - lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); - return ret; -} diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h deleted file mode 100644 index 8fb1706d752667ae0682fd2a730665d16a765f21..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/scan.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Interface for the wlan network scan routines - * - * Driver interface functions and type declarations for the scan module - * implemented in scan.c. - */ -#ifndef _LBS_SCAN_H -#define _LBS_SCAN_H - -#include - -struct lbs_private; - -#define MAX_NETWORK_COUNT 128 - -/** Chan-freq-TxPower mapping table*/ -struct chan_freq_power { - /** channel Number */ - u16 channel; - /** frequency of this channel */ - u32 freq; - /** Max allowed Tx power level */ - u16 maxtxpower; - /** TRUE:channel unsupported; FLASE:supported*/ - u8 unsupported; -}; - -/** region-band mapping table*/ -struct region_channel { - /** TRUE if this entry is valid */ - u8 valid; - /** region code for US, Japan ... */ - u8 region; - /** band B/G/A, used for BAND_CONFIG cmd */ - u8 band; - /** Actual No. of elements in the array below */ - u8 nrcfp; - /** chan-freq-txpower mapping table*/ - struct chan_freq_power *CFP; -}; - -/** - * @brief Maximum number of channels that can be sent in a setuserscan ioctl - */ -#define LBS_IOCTL_USER_SCAN_CHAN_MAX 50 - -int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); - -int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band); - -int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, - u8 ssid_len); - -int lbs_get_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra); -int lbs_set_scan(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *wrqu, char *extra); - -int lbs_scan_networks(struct lbs_private *priv, int full_scan); - -void lbs_scan_worker(struct work_struct *work); - -#endif diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c index a9bf658659ebae51d0fe1575b211cbfbdb0ac054..8000ca6165d0b4e619f6fafb5a5ef1f9e7a73367 100644 --- a/drivers/net/wireless/libertas/tx.c +++ b/drivers/net/wireless/libertas/tx.c @@ -4,13 +4,13 @@ #include #include #include +#include #include "host.h" #include "radiotap.h" #include "decl.h" #include "defs.h" #include "dev.h" -#include "wext.h" /** * @brief This function converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE @@ -111,7 +111,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) p802x_hdr = skb->data; pkt_len = skb->len; - if (dev == priv->rtap_net_dev) { + if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; /* set txpd fields from the radiotap header */ @@ -147,7 +147,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - if (priv->monitormode) { + if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); @@ -158,6 +158,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) free: dev_kfree_skb_any(skb); } + unlock: spin_unlock_irqrestore(&priv->driver_lock, flags); wake_up(&priv->waitq); @@ -179,7 +180,8 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) { struct tx_radiotap_hdr *radiotap_hdr; - if (!priv->monitormode || priv->currenttxskb == NULL) + if (priv->wdev->iftype != NL80211_IFTYPE_MONITOR || + priv->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; @@ -188,7 +190,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) (1 + priv->txretrycount - try_count) : 0; priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, - priv->rtap_net_dev); + priv->dev); netif_rx(priv->currenttxskb); priv->currenttxskb = NULL; diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h index 3e72c86ceca85cf810cf3d719df32e08785087fa..462fbb4cb743b839f17e061739b83e9f2b499d2b 100644 --- a/drivers/net/wireless/libertas/types.h +++ b/drivers/net/wireless/libertas/types.h @@ -11,7 +11,7 @@ struct ieee_ie_header { u8 id; u8 len; -} __attribute__ ((packed)); +} __packed; struct ieee_ie_cf_param_set { struct ieee_ie_header header; @@ -20,19 +20,19 @@ struct ieee_ie_cf_param_set { u8 cfpperiod; __le16 cfpmaxduration; __le16 cfpdurationremaining; -} __attribute__ ((packed)); +} __packed; struct ieee_ie_ibss_param_set { struct ieee_ie_header header; __le16 atimwindow; -} __attribute__ ((packed)); +} __packed; union ieee_ss_param_set { struct ieee_ie_cf_param_set cf; struct ieee_ie_ibss_param_set ibss; -} __attribute__ ((packed)); +} __packed; struct ieee_ie_fh_param_set { struct ieee_ie_header header; @@ -41,18 +41,18 @@ struct ieee_ie_fh_param_set { u8 hopset; u8 hoppattern; u8 hopindex; -} __attribute__ ((packed)); +} __packed; struct ieee_ie_ds_param_set { struct ieee_ie_header header; u8 channel; -} __attribute__ ((packed)); +} __packed; union ieee_phy_param_set { struct ieee_ie_fh_param_set fh; struct ieee_ie_ds_param_set ds; -} __attribute__ ((packed)); +} __packed; /** TLV type ID definition */ #define PROPRIETARY_TLV_BASE_ID 0x0100 @@ -100,28 +100,28 @@ union ieee_phy_param_set { struct mrvl_ie_header { __le16 type; __le16 len; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_data { struct mrvl_ie_header header; u8 Data[1]; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_rates_param_set { struct mrvl_ie_header header; u8 rates[1]; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_ssid_param_set { struct mrvl_ie_header header; u8 ssid[1]; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_wildcard_ssid_param_set { struct mrvl_ie_header header; u8 MaxSsidlength; u8 ssid[1]; -} __attribute__ ((packed)); +} __packed; struct chanscanmode { #ifdef __BIG_ENDIAN_BITFIELD @@ -133,7 +133,7 @@ struct chanscanmode { u8 disablechanfilt:1; u8 reserved_2_7:6; #endif -} __attribute__ ((packed)); +} __packed; struct chanscanparamset { u8 radiotype; @@ -141,12 +141,12 @@ struct chanscanparamset { struct chanscanmode chanscanmode; __le16 minscantime; __le16 maxscantime; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_chanlist_param_set { struct mrvl_ie_header header; struct chanscanparamset chanscanparam[1]; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_cf_param_set { struct mrvl_ie_header header; @@ -154,86 +154,86 @@ struct mrvl_ie_cf_param_set { u8 cfpperiod; __le16 cfpmaxduration; __le16 cfpdurationremaining; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_ds_param_set { struct mrvl_ie_header header; u8 channel; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_rsn_param_set { struct mrvl_ie_header header; u8 rsnie[1]; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_tsf_timestamp { struct mrvl_ie_header header; __le64 tsftable[1]; -} __attribute__ ((packed)); +} __packed; /* v9 and later firmware only */ struct mrvl_ie_auth_type { struct mrvl_ie_header header; __le16 auth; -} __attribute__ ((packed)); +} __packed; /** Local Power capability */ struct mrvl_ie_power_capability { struct mrvl_ie_header header; s8 minpower; s8 maxpower; -} __attribute__ ((packed)); +} __packed; /* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */ struct mrvl_ie_thresholds { struct mrvl_ie_header header; u8 value; u8 freq; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_beacons_missed { struct mrvl_ie_header header; u8 beaconmissed; u8 reserved; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_num_probes { struct mrvl_ie_header header; __le16 numprobes; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_bcast_probe { struct mrvl_ie_header header; __le16 bcastprobe; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_num_ssid_probe { struct mrvl_ie_header header; __le16 numssidprobe; -} __attribute__ ((packed)); +} __packed; struct led_pin { u8 led; u8 pin; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_ledgpio { struct mrvl_ie_header header; struct led_pin ledpin[1]; -} __attribute__ ((packed)); +} __packed; struct led_bhv { uint8_t firmwarestate; uint8_t led; uint8_t ledstate; uint8_t ledarg; -} __attribute__ ((packed)); +} __packed; struct mrvl_ie_ledbhv { struct mrvl_ie_header header; struct led_bhv ledbhv[1]; -} __attribute__ ((packed)); +} __packed; /* Meant to be packed as the value member of a struct ieee80211_info_element. * Note that the len member of the ieee80211_info_element varies depending on @@ -248,12 +248,12 @@ struct mrvl_meshie_val { uint8_t mesh_capability; uint8_t mesh_id_len; uint8_t mesh_id[IEEE80211_MAX_SSID_LEN]; -} __attribute__ ((packed)); +} __packed; struct mrvl_meshie { u8 id, len; struct mrvl_meshie_val val; -} __attribute__ ((packed)); +} __packed; struct mrvl_mesh_defaults { __le32 bootflag; @@ -261,6 +261,6 @@ struct mrvl_mesh_defaults { uint8_t reserved; __le16 channel; struct mrvl_meshie meshie; -} __attribute__ ((packed)); +} __packed; #endif diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c deleted file mode 100644 index f96a96031a501fbd17a35a0cd63cfdba1fcd6e9d..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/wext.c +++ /dev/null @@ -1,2353 +0,0 @@ -/** - * This file contains ioctl functions - */ -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "host.h" -#include "radiotap.h" -#include "decl.h" -#include "defs.h" -#include "dev.h" -#include "wext.h" -#include "scan.h" -#include "assoc.h" -#include "cmd.h" - - -static inline void lbs_postpone_association_work(struct lbs_private *priv) -{ - if (priv->surpriseremoved) - return; - cancel_delayed_work(&priv->assoc_work); - queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2); -} - -static inline void lbs_do_association_work(struct lbs_private *priv) -{ - if (priv->surpriseremoved) - return; - cancel_delayed_work(&priv->assoc_work); - queue_delayed_work(priv->work_thread, &priv->assoc_work, 0); -} - -static inline void lbs_cancel_association_work(struct lbs_private *priv) -{ - cancel_delayed_work(&priv->assoc_work); - kfree(priv->pending_assoc_req); - priv->pending_assoc_req = NULL; -} - -void lbs_send_disconnect_notification(struct lbs_private *priv) -{ - union iwreq_data wrqu; - - memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN); - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); -} - -static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str) -{ - union iwreq_data iwrq; - u8 buf[50]; - - lbs_deb_enter(LBS_DEB_WEXT); - - memset(&iwrq, 0, sizeof(union iwreq_data)); - memset(buf, 0, sizeof(buf)); - - snprintf(buf, sizeof(buf) - 1, "%s", str); - - iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN; - - /* Send Event to upper layer */ - lbs_deb_wext("event indication string %s\n", (char *)buf); - lbs_deb_wext("event indication length %d\n", iwrq.data.length); - lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str); - - wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf); - - lbs_deb_leave(LBS_DEB_WEXT); -} - -/** - * @brief This function handles MIC failure event. - * - * @param priv A pointer to struct lbs_private structure - * @para event the event id - * @return n/a - */ -void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event) -{ - char buf[50]; - - lbs_deb_enter(LBS_DEB_CMD); - memset(buf, 0, sizeof(buf)); - - sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication "); - - if (event == MACREG_INT_CODE_MIC_ERR_UNICAST) - strcat(buf, "unicast "); - else - strcat(buf, "multicast "); - - lbs_send_iwevcustom_event(priv, buf); - lbs_deb_leave(LBS_DEB_CMD); -} - -/** - * @brief Find the channel frequency power info with specific channel - * - * @param priv A pointer to struct lbs_private structure - * @param band it can be BAND_A, BAND_G or BAND_B - * @param channel the channel for looking - * @return A pointer to struct chan_freq_power structure or NULL if not find. - */ -struct chan_freq_power *lbs_find_cfp_by_band_and_channel( - struct lbs_private *priv, - u8 band, - u16 channel) -{ - struct chan_freq_power *cfp = NULL; - struct region_channel *rc; - int i, j; - - for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { - rc = &priv->region_channel[j]; - - if (!rc->valid || !rc->CFP) - continue; - if (rc->band != band) - continue; - for (i = 0; i < rc->nrcfp; i++) { - if (rc->CFP[i].channel == channel) { - cfp = &rc->CFP[i]; - break; - } - } - } - - if (!cfp && channel) - lbs_deb_wext("lbs_find_cfp_by_band_and_channel: can't find " - "cfp by band %d / channel %d\n", band, channel); - - return cfp; -} - -/** - * @brief Find the channel frequency power info with specific frequency - * - * @param priv A pointer to struct lbs_private structure - * @param band it can be BAND_A, BAND_G or BAND_B - * @param freq the frequency for looking - * @return A pointer to struct chan_freq_power structure or NULL if not find. - */ -static struct chan_freq_power *find_cfp_by_band_and_freq( - struct lbs_private *priv, - u8 band, - u32 freq) -{ - struct chan_freq_power *cfp = NULL; - struct region_channel *rc; - int i, j; - - for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { - rc = &priv->region_channel[j]; - - if (!rc->valid || !rc->CFP) - continue; - if (rc->band != band) - continue; - for (i = 0; i < rc->nrcfp; i++) { - if (rc->CFP[i].freq == freq) { - cfp = &rc->CFP[i]; - break; - } - } - } - - if (!cfp && freq) - lbs_deb_wext("find_cfp_by_band_and_freql: can't find cfp by " - "band %d / freq %d\n", band, freq); - - return cfp; -} - -/** - * @brief Copy active data rates based on adapter mode and status - * - * @param priv A pointer to struct lbs_private structure - * @param rate The buf to return the active rates - */ -static void copy_active_data_rates(struct lbs_private *priv, u8 *rates) -{ - lbs_deb_enter(LBS_DEB_WEXT); - - if ((priv->connect_status != LBS_CONNECTED) && - !lbs_mesh_connected(priv)) - memcpy(rates, lbs_bg_rates, MAX_RATES); - else - memcpy(rates, priv->curbssparams.rates, MAX_RATES); - - lbs_deb_leave(LBS_DEB_WEXT); -} - -static int lbs_get_name(struct net_device *dev, struct iw_request_info *info, - char *cwrq, char *extra) -{ - - lbs_deb_enter(LBS_DEB_WEXT); - - /* We could add support for 802.11n here as needed. Jean II */ - snprintf(cwrq, IFNAMSIZ, "IEEE 802.11b/g"); - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *fwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - struct chan_freq_power *cfp; - - lbs_deb_enter(LBS_DEB_WEXT); - - cfp = lbs_find_cfp_by_band_and_channel(priv, 0, - priv->channel); - - if (!cfp) { - if (priv->channel) - lbs_deb_wext("invalid channel %d\n", - priv->channel); - return -EINVAL; - } - - fwrq->m = (long)cfp->freq * 100000; - fwrq->e = 1; - - lbs_deb_wext("freq %u\n", fwrq->m); - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (priv->connect_status == LBS_CONNECTED) { - memcpy(awrq->sa_data, priv->curbssparams.bssid, ETH_ALEN); - } else { - memset(awrq->sa_data, 0, ETH_ALEN); - } - awrq->sa_family = ARPHRD_ETHER; - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - /* - * Check the size of the string - */ - - if (dwrq->length > 16) { - return -E2BIG; - } - - mutex_lock(&priv->lock); - memset(priv->nodename, 0, sizeof(priv->nodename)); - memcpy(priv->nodename, extra, dwrq->length); - mutex_unlock(&priv->lock); - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - dwrq->length = strlen(priv->nodename); - memcpy(extra, priv->nodename, dwrq->length); - extra[dwrq->length] = '\0'; - - dwrq->flags = 1; /* active */ - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -#ifdef CONFIG_LIBERTAS_MESH -static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - /* Use nickname to indicate that mesh is on */ - - if (lbs_mesh_connected(priv)) { - strncpy(extra, "Mesh", 12); - extra[12] = '\0'; - dwrq->length = strlen(extra); - } - - else { - extra[0] = '\0'; - dwrq->length = 0; - } - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} -#endif - -static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - int ret = 0; - struct lbs_private *priv = dev->ml_priv; - u32 val = vwrq->value; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (vwrq->disabled) - val = MRVDRV_RTS_MAX_VALUE; - - if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */ - return -EINVAL; - - ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - u16 val = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val); - if (ret) - goto out; - - vwrq->value = val; - vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */ - vwrq->fixed = 1; - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - u32 val = vwrq->value; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (vwrq->disabled) - val = MRVDRV_FRAG_MAX_VALUE; - - if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE) - return -EINVAL; - - ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - u16 val = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val); - if (ret) - goto out; - - vwrq->value = val; - vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE) - || (val > MRVDRV_FRAG_MAX_VALUE)); - vwrq->fixed = 1; - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_mode(struct net_device *dev, - struct iw_request_info *info, u32 * uwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - *uwrq = priv->mode; - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -#ifdef CONFIG_LIBERTAS_MESH -static int mesh_wlan_get_mode(struct net_device *dev, - struct iw_request_info *info, u32 * uwrq, - char *extra) -{ - lbs_deb_enter(LBS_DEB_WEXT); - - *uwrq = IW_MODE_REPEAT; - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} -#endif - -static int lbs_get_txpow(struct net_device *dev, - struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - s16 curlevel = 0; - int ret = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (!priv->radio_on) { - lbs_deb_wext("tx power off\n"); - vwrq->value = 0; - vwrq->disabled = 1; - goto out; - } - - ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL); - if (ret) - goto out; - - lbs_deb_wext("tx power level %d dbm\n", curlevel); - priv->txpower_cur = curlevel; - - vwrq->value = curlevel; - vwrq->fixed = 1; - vwrq->disabled = 0; - vwrq->flags = IW_TXPOW_DBM; - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - u16 slimit = 0, llimit = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT) - return -EOPNOTSUPP; - - /* The MAC has a 4-bit Total_Tx_Count register - Total_Tx_Count = 1 + Tx_Retry_Count */ -#define TX_RETRY_MIN 0 -#define TX_RETRY_MAX 14 - if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX) - return -EINVAL; - - /* Add 1 to convert retry count to try count */ - if (vwrq->flags & IW_RETRY_SHORT) - slimit = (u16) (vwrq->value + 1); - else if (vwrq->flags & IW_RETRY_LONG) - llimit = (u16) (vwrq->value + 1); - else - slimit = llimit = (u16) (vwrq->value + 1); /* set both */ - - if (llimit) { - ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, - llimit); - if (ret) - goto out; - } - - if (slimit) { - /* txretrycount follows the short retry limit */ - priv->txretrycount = slimit; - ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, - slimit); - if (ret) - goto out; - } - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - u16 val = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - vwrq->disabled = 0; - - if (vwrq->flags & IW_RETRY_LONG) { - ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val); - if (ret) - goto out; - - /* Subtract 1 to convert try count to retry count */ - vwrq->value = val - 1; - vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; - } else { - ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val); - if (ret) - goto out; - - /* txretry count follows the short retry limit */ - priv->txretrycount = val; - /* Subtract 1 to convert try count to retry count */ - vwrq->value = val - 1; - vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; - } - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static inline void sort_channels(struct iw_freq *freq, int num) -{ - int i, j; - struct iw_freq temp; - - for (i = 0; i < num; i++) - for (j = i + 1; j < num; j++) - if (freq[i].i > freq[j].i) { - temp.i = freq[i].i; - temp.m = freq[i].m; - - freq[i].i = freq[j].i; - freq[i].m = freq[j].m; - - freq[j].i = temp.i; - freq[j].m = temp.m; - } -} - -/* data rate listing - MULTI_BANDS: - abg a b b/g - Infra G(12) A(8) B(4) G(12) - Adhoc A+B(12) A(8) B(4) B(4) - - non-MULTI_BANDS: - b b/g - Infra B(4) G(12) - Adhoc B(4) B(4) - */ -/** - * @brief Get Range Info - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param vwrq A pointer to iw_param structure - * @param extra A pointer to extra data buf - * @return 0 --success, otherwise fail - */ -static int lbs_get_range(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - int i, j; - struct lbs_private *priv = dev->ml_priv; - struct iw_range *range = (struct iw_range *)extra; - struct chan_freq_power *cfp; - u8 rates[MAX_RATES + 1]; - - lbs_deb_enter(LBS_DEB_WEXT); - - dwrq->length = sizeof(struct iw_range); - memset(range, 0, sizeof(struct iw_range)); - - range->min_nwid = 0; - range->max_nwid = 0; - - memset(rates, 0, sizeof(rates)); - copy_active_data_rates(priv, rates); - range->num_bitrates = strnlen(rates, IW_MAX_BITRATES); - for (i = 0; i < range->num_bitrates; i++) - range->bitrate[i] = rates[i] * 500000; - range->num_bitrates = i; - lbs_deb_wext("IW_MAX_BITRATES %d, num_bitrates %d\n", IW_MAX_BITRATES, - range->num_bitrates); - - range->num_frequency = 0; - - range->scan_capa = IW_SCAN_CAPA_ESSID; - - for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES) - && (j < ARRAY_SIZE(priv->region_channel)); j++) { - cfp = priv->region_channel[j].CFP; - for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES) - && priv->region_channel[j].valid - && cfp - && (i < priv->region_channel[j].nrcfp); i++) { - range->freq[range->num_frequency].i = - (long)cfp->channel; - range->freq[range->num_frequency].m = - (long)cfp->freq * 100000; - range->freq[range->num_frequency].e = 1; - cfp++; - range->num_frequency++; - } - } - - lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n", - IW_MAX_FREQUENCIES, range->num_frequency); - - range->num_channels = range->num_frequency; - - sort_channels(&range->freq[0], range->num_frequency); - - /* - * Set an indication of the max TCP throughput in bit/s that we can - * expect using this interface - */ - if (i > 2) - range->throughput = 5000 * 1000; - else - range->throughput = 1500 * 1000; - - range->min_rts = MRVDRV_RTS_MIN_VALUE; - range->max_rts = MRVDRV_RTS_MAX_VALUE; - range->min_frag = MRVDRV_FRAG_MIN_VALUE; - range->max_frag = MRVDRV_FRAG_MAX_VALUE; - - range->encoding_size[0] = 5; - range->encoding_size[1] = 13; - range->num_encoding_sizes = 2; - range->max_encoding_tokens = 4; - - /* - * Right now we support only "iwconfig ethX power on|off" - */ - range->pm_capa = IW_POWER_ON; - - /* - * Minimum version we recommend - */ - range->we_version_source = 15; - - /* - * Version we are compiled with - */ - range->we_version_compiled = WIRELESS_EXT; - - range->retry_capa = IW_RETRY_LIMIT; - range->retry_flags = IW_RETRY_LIMIT | IW_RETRY_MAX; - - range->min_retry = TX_RETRY_MIN; - range->max_retry = TX_RETRY_MAX; - - /* - * Set the qual, level and noise range values - */ - range->max_qual.qual = 100; - range->max_qual.level = 0; - range->max_qual.noise = 0; - range->max_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; - - range->avg_qual.qual = 70; - /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ - range->avg_qual.level = 0; - range->avg_qual.noise = 0; - range->avg_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; - - range->sensitivity = 0; - - /* Setup the supported power level ranges */ - memset(range->txpower, 0, sizeof(range->txpower)); - range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE; - range->txpower[0] = priv->txpower_min; - range->txpower[1] = priv->txpower_max; - range->num_txpower = 2; - - range->event_capa[0] = (IW_EVENT_CAPA_K_0 | - IW_EVENT_CAPA_MASK(SIOCGIWAP) | - IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); - range->event_capa[1] = IW_EVENT_CAPA_K_1; - - if (priv->fwcapinfo & FW_CAPINFO_WPA) { - range->enc_capa = IW_ENC_CAPA_WPA - | IW_ENC_CAPA_WPA2 - | IW_ENC_CAPA_CIPHER_TKIP - | IW_ENC_CAPA_CIPHER_CCMP; - } - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_set_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (!(priv->fwcapinfo & FW_CAPINFO_PS)) { - if (vwrq->disabled) - return 0; - else - return -EINVAL; - } - - /* PS is currently supported only in Infrastructure mode - * Remove this check if it is to be supported in IBSS mode also - */ - - if (vwrq->disabled) { - priv->psmode = LBS802_11POWERMODECAM; - if (priv->psstate != PS_STATE_FULL_POWER) { - lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); - } - - return 0; - } - - if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { - lbs_deb_wext( - "setting power timeout is not supported\n"); - return -EINVAL; - } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { - vwrq->value = vwrq->value / 1000; - if (!priv->enter_deep_sleep) { - lbs_pr_err("deep sleep feature is not implemented " - "for this interface driver\n"); - return -EINVAL; - } - - if (priv->connect_status == LBS_CONNECTED) { - if ((priv->is_auto_deep_sleep_enabled) && - (vwrq->value == -1000)) { - lbs_exit_auto_deep_sleep(priv); - return 0; - } else { - lbs_pr_err("can't use deep sleep cmd in " - "connected state\n"); - return -EINVAL; - } - } - - if ((vwrq->value < 0) && (vwrq->value != -1000)) { - lbs_pr_err("unknown option\n"); - return -EINVAL; - } - - if (vwrq->value > 0) { - if (!priv->is_auto_deep_sleep_enabled) { - priv->is_activity_detected = 0; - priv->auto_deep_sleep_timeout = vwrq->value; - lbs_enter_auto_deep_sleep(priv); - } else { - priv->auto_deep_sleep_timeout = vwrq->value; - lbs_deb_debugfs("auto deep sleep: " - "already enabled\n"); - } - return 0; - } else { - if (priv->is_auto_deep_sleep_enabled) { - lbs_exit_auto_deep_sleep(priv); - /* Try to exit deep sleep if auto */ - /*deep sleep disabled */ - ret = lbs_set_deep_sleep(priv, 0); - } - if (vwrq->value == 0) - ret = lbs_set_deep_sleep(priv, 1); - else if (vwrq->value == -1000) - ret = lbs_set_deep_sleep(priv, 0); - return ret; - } - } - - if (priv->psmode != LBS802_11POWERMODECAM) { - return 0; - } - - priv->psmode = LBS802_11POWERMODEMAX_PSP; - - if (priv->connect_status == LBS_CONNECTED) { - lbs_ps_sleep(priv, CMD_OPTION_WAITFORRSP); - } - - lbs_deb_leave(LBS_DEB_WEXT); - - return 0; -} - -static int lbs_get_power(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - vwrq->value = 0; - vwrq->flags = 0; - vwrq->disabled = priv->psmode == LBS802_11POWERMODECAM - || priv->connect_status == LBS_DISCONNECTED; - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev) -{ - enum { - POOR = 30, - FAIR = 60, - GOOD = 80, - VERY_GOOD = 90, - EXCELLENT = 95, - PERFECT = 100 - }; - struct lbs_private *priv = dev->ml_priv; - u32 rssi_qual; - u32 tx_qual; - u32 quality = 0; - int ret, stats_valid = 0; - u8 rssi; - u32 tx_retries; - struct cmd_ds_802_11_get_log log; - - lbs_deb_enter(LBS_DEB_WEXT); - - priv->wstats.status = priv->mode; - - /* If we're not associated, all quality values are meaningless */ - if ((priv->connect_status != LBS_CONNECTED) && - !lbs_mesh_connected(priv)) - goto out; - - /* Quality by RSSI */ - priv->wstats.qual.level = - CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG], - priv->NF[TYPE_BEACON][TYPE_NOAVG]); - - if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) { - priv->wstats.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE; - } else { - priv->wstats.qual.noise = - CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]); - } - - lbs_deb_wext("signal level %#x\n", priv->wstats.qual.level); - lbs_deb_wext("noise %#x\n", priv->wstats.qual.noise); - - rssi = priv->wstats.qual.level - priv->wstats.qual.noise; - if (rssi < 15) - rssi_qual = rssi * POOR / 10; - else if (rssi < 20) - rssi_qual = (rssi - 15) * (FAIR - POOR) / 5 + POOR; - else if (rssi < 30) - rssi_qual = (rssi - 20) * (GOOD - FAIR) / 5 + FAIR; - else if (rssi < 40) - rssi_qual = (rssi - 30) * (VERY_GOOD - GOOD) / - 10 + GOOD; - else - rssi_qual = (rssi - 40) * (PERFECT - VERY_GOOD) / - 10 + VERY_GOOD; - quality = rssi_qual; - - /* Quality by TX errors */ - priv->wstats.discard.retries = dev->stats.tx_errors; - - memset(&log, 0, sizeof(log)); - log.hdr.size = cpu_to_le16(sizeof(log)); - ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log); - if (ret) - goto out; - - tx_retries = le32_to_cpu(log.retry); - - if (tx_retries > 75) - tx_qual = (90 - tx_retries) * POOR / 15; - else if (tx_retries > 70) - tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR; - else if (tx_retries > 65) - tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR; - else if (tx_retries > 50) - tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) / - 15 + GOOD; - else - tx_qual = (50 - tx_retries) * - (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; - quality = min(quality, tx_qual); - - priv->wstats.discard.code = le32_to_cpu(log.wepundecryptable); - priv->wstats.discard.retries = tx_retries; - priv->wstats.discard.misc = le32_to_cpu(log.ackfailure); - - /* Calculate quality */ - priv->wstats.qual.qual = min_t(u8, quality, 100); - priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; - stats_valid = 1; - - /* update stats asynchronously for future calls */ - ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, - 0, 0, NULL); - if (ret) - lbs_pr_err("RSSI command failed\n"); -out: - if (!stats_valid) { - priv->wstats.miss.beacon = 0; - priv->wstats.discard.retries = 0; - priv->wstats.qual.qual = 0; - priv->wstats.qual.level = 0; - priv->wstats.qual.noise = 0; - priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED; - priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID | - IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; - } - - lbs_deb_leave(LBS_DEB_WEXT); - return &priv->wstats; - - -} - -static int lbs_set_freq(struct net_device *dev, struct iw_request_info *info, - struct iw_freq *fwrq, char *extra) -{ - int ret = -EINVAL; - struct lbs_private *priv = dev->ml_priv; - struct chan_freq_power *cfp; - struct assoc_request * assoc_req; - - lbs_deb_enter(LBS_DEB_WEXT); - - mutex_lock(&priv->lock); - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - ret = -ENOMEM; - goto out; - } - - /* If setting by frequency, convert to a channel */ - if (fwrq->e == 1) { - long f = fwrq->m / 100000; - - cfp = find_cfp_by_band_and_freq(priv, 0, f); - if (!cfp) { - lbs_deb_wext("invalid freq %ld\n", f); - goto out; - } - - fwrq->e = 0; - fwrq->m = (int) cfp->channel; - } - - /* Setting by channel number */ - if (fwrq->m > 1000 || fwrq->e > 0) { - goto out; - } - - cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m); - if (!cfp) { - goto out; - } - - assoc_req->channel = fwrq->m; - ret = 0; - -out: - if (ret == 0) { - set_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags); - lbs_postpone_association_work(priv); - } else { - lbs_cancel_association_work(priv); - } - mutex_unlock(&priv->lock); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -#ifdef CONFIG_LIBERTAS_MESH -static int lbs_mesh_set_freq(struct net_device *dev, - struct iw_request_info *info, - struct iw_freq *fwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - struct chan_freq_power *cfp; - int ret = -EINVAL; - - lbs_deb_enter(LBS_DEB_WEXT); - - /* If setting by frequency, convert to a channel */ - if (fwrq->e == 1) { - long f = fwrq->m / 100000; - - cfp = find_cfp_by_band_and_freq(priv, 0, f); - if (!cfp) { - lbs_deb_wext("invalid freq %ld\n", f); - goto out; - } - - fwrq->e = 0; - fwrq->m = (int) cfp->channel; - } - - /* Setting by channel number */ - if (fwrq->m > 1000 || fwrq->e > 0) { - goto out; - } - - cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m); - if (!cfp) { - goto out; - } - - if (fwrq->m != priv->channel) { - lbs_deb_wext("mesh channel change forces eth disconnect\n"); - if (priv->mode == IW_MODE_INFRA) - lbs_cmd_80211_deauthenticate(priv, - priv->curbssparams.bssid, - WLAN_REASON_DEAUTH_LEAVING); - else if (priv->mode == IW_MODE_ADHOC) - lbs_adhoc_stop(priv); - } - lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m); - lbs_update_channel(priv); - ret = 0; - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} -#endif - -static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - u8 new_rate = 0; - int ret = -EINVAL; - u8 rates[MAX_RATES + 1]; - - lbs_deb_enter(LBS_DEB_WEXT); - - lbs_deb_wext("vwrq->value %d\n", vwrq->value); - lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed); - - if (vwrq->fixed && vwrq->value == -1) - goto out; - - /* Auto rate? */ - priv->enablehwauto = !vwrq->fixed; - - if (vwrq->value == -1) - priv->cur_rate = 0; - else { - if (vwrq->value % 100000) - goto out; - - new_rate = vwrq->value / 500000; - priv->cur_rate = new_rate; - /* the rest is only needed for lbs_set_data_rate() */ - memset(rates, 0, sizeof(rates)); - copy_active_data_rates(priv, rates); - if (!memchr(rates, new_rate, sizeof(rates))) { - lbs_pr_alert("fixed data rate 0x%X out of range\n", - new_rate); - goto out; - } - if (priv->fwrelease < 0x09000000) { - ret = lbs_set_power_adapt_cfg(priv, 0, - POW_ADAPT_DEFAULT_P0, - POW_ADAPT_DEFAULT_P1, - POW_ADAPT_DEFAULT_P2); - if (ret) - goto out; - } - ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1, - TPC_DEFAULT_P2, 1); - if (ret) - goto out; - } - - /* Try the newer command first (Firmware Spec 5.1 and above) */ - ret = lbs_cmd_802_11_rate_adapt_rateset(priv, CMD_ACT_SET); - - /* Fallback to older version */ - if (ret) - ret = lbs_set_data_rate(priv, new_rate); - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (priv->connect_status == LBS_CONNECTED) { - vwrq->value = priv->cur_rate * 500000; - - if (priv->enablehwauto) - vwrq->fixed = 0; - else - vwrq->fixed = 1; - - } else { - vwrq->fixed = 0; - vwrq->value = 0; - } - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_set_mode(struct net_device *dev, - struct iw_request_info *info, u32 * uwrq, char *extra) -{ - int ret = 0; - struct lbs_private *priv = dev->ml_priv; - struct assoc_request * assoc_req; - - lbs_deb_enter(LBS_DEB_WEXT); - - if ( (*uwrq != IW_MODE_ADHOC) - && (*uwrq != IW_MODE_INFRA) - && (*uwrq != IW_MODE_AUTO)) { - lbs_deb_wext("Invalid mode: 0x%x\n", *uwrq); - ret = -EINVAL; - goto out; - } - - mutex_lock(&priv->lock); - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - ret = -ENOMEM; - lbs_cancel_association_work(priv); - } else { - assoc_req->mode = *uwrq; - set_bit(ASSOC_FLAG_MODE, &assoc_req->flags); - lbs_postpone_association_work(priv); - lbs_deb_wext("Switching to mode: 0x%x\n", *uwrq); - } - mutex_unlock(&priv->lock); - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - - -/** - * @brief Get Encryption key - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param vwrq A pointer to iw_param structure - * @param extra A pointer to extra data buf - * @return 0 --success, otherwise fail - */ -static int lbs_get_encode(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, u8 * extra) -{ - struct lbs_private *priv = dev->ml_priv; - int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; - - lbs_deb_enter(LBS_DEB_WEXT); - - lbs_deb_wext("flags 0x%x, index %d, length %d, wep_tx_keyidx %d\n", - dwrq->flags, index, dwrq->length, priv->wep_tx_keyidx); - - dwrq->flags = 0; - - /* Authentication method */ - switch (priv->secinfo.auth_mode) { - case IW_AUTH_ALG_OPEN_SYSTEM: - dwrq->flags = IW_ENCODE_OPEN; - break; - - case IW_AUTH_ALG_SHARED_KEY: - case IW_AUTH_ALG_LEAP: - dwrq->flags = IW_ENCODE_RESTRICTED; - break; - default: - dwrq->flags = IW_ENCODE_DISABLED | IW_ENCODE_OPEN; - break; - } - - memset(extra, 0, 16); - - mutex_lock(&priv->lock); - - /* Default to returning current transmit key */ - if (index < 0) - index = priv->wep_tx_keyidx; - - if ((priv->wep_keys[index].len) && priv->secinfo.wep_enabled) { - memcpy(extra, priv->wep_keys[index].key, - priv->wep_keys[index].len); - dwrq->length = priv->wep_keys[index].len; - - dwrq->flags |= (index + 1); - /* Return WEP enabled */ - dwrq->flags &= ~IW_ENCODE_DISABLED; - } else if ((priv->secinfo.WPAenabled) - || (priv->secinfo.WPA2enabled)) { - /* return WPA enabled */ - dwrq->flags &= ~IW_ENCODE_DISABLED; - dwrq->flags |= IW_ENCODE_NOKEY; - } else { - dwrq->flags |= IW_ENCODE_DISABLED; - } - - mutex_unlock(&priv->lock); - - lbs_deb_wext("key: %02x:%02x:%02x:%02x:%02x:%02x, keylen %d\n", - extra[0], extra[1], extra[2], - extra[3], extra[4], extra[5], dwrq->length); - - lbs_deb_wext("return flags 0x%x\n", dwrq->flags); - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -/** - * @brief Set Encryption key (internal) - * - * @param priv A pointer to private card structure - * @param key_material A pointer to key material - * @param key_length length of key material - * @param index key index to set - * @param set_tx_key Force set TX key (1 = yes, 0 = no) - * @return 0 --success, otherwise fail - */ -static int lbs_set_wep_key(struct assoc_request *assoc_req, - const char *key_material, - u16 key_length, - u16 index, - int set_tx_key) -{ - int ret = 0; - struct enc_key *pkey; - - lbs_deb_enter(LBS_DEB_WEXT); - - /* Paranoid validation of key index */ - if (index > 3) { - ret = -EINVAL; - goto out; - } - - /* validate max key length */ - if (key_length > KEY_LEN_WEP_104) { - ret = -EINVAL; - goto out; - } - - pkey = &assoc_req->wep_keys[index]; - - if (key_length > 0) { - memset(pkey, 0, sizeof(struct enc_key)); - pkey->type = KEY_TYPE_ID_WEP; - - /* Standardize the key length */ - pkey->len = (key_length > KEY_LEN_WEP_40) ? - KEY_LEN_WEP_104 : KEY_LEN_WEP_40; - memcpy(pkey->key, key_material, key_length); - } - - if (set_tx_key) { - /* Ensure the chosen key is valid */ - if (!pkey->len) { - lbs_deb_wext("key not set, so cannot enable it\n"); - ret = -EINVAL; - goto out; - } - assoc_req->wep_tx_keyidx = index; - } - - assoc_req->secinfo.wep_enabled = 1; - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int validate_key_index(u16 def_index, u16 raw_index, - u16 *out_index, u16 *is_default) -{ - if (!out_index || !is_default) - return -EINVAL; - - /* Verify index if present, otherwise use default TX key index */ - if (raw_index > 0) { - if (raw_index > 4) - return -EINVAL; - *out_index = raw_index - 1; - } else { - *out_index = def_index; - *is_default = 1; - } - return 0; -} - -static void disable_wep(struct assoc_request *assoc_req) -{ - int i; - - lbs_deb_enter(LBS_DEB_WEXT); - - /* Set Open System auth mode */ - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - - /* Clear WEP keys and mark WEP as disabled */ - assoc_req->secinfo.wep_enabled = 0; - for (i = 0; i < 4; i++) - assoc_req->wep_keys[i].len = 0; - - set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); - set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); - - lbs_deb_leave(LBS_DEB_WEXT); -} - -static void disable_wpa(struct assoc_request *assoc_req) -{ - lbs_deb_enter(LBS_DEB_WEXT); - - memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct enc_key)); - assoc_req->wpa_mcast_key.flags = KEY_INFO_WPA_MCAST; - set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); - - memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct enc_key)); - assoc_req->wpa_unicast_key.flags = KEY_INFO_WPA_UNICAST; - set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); - - assoc_req->secinfo.WPAenabled = 0; - assoc_req->secinfo.WPA2enabled = 0; - set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); - - lbs_deb_leave(LBS_DEB_WEXT); -} - -/** - * @brief Set Encryption key - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param vwrq A pointer to iw_param structure - * @param extra A pointer to extra data buf - * @return 0 --success, otherwise fail - */ -static int lbs_set_encode(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - int ret = 0; - struct lbs_private *priv = dev->ml_priv; - struct assoc_request * assoc_req; - u16 is_default = 0, index = 0, set_tx_key = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - mutex_lock(&priv->lock); - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - ret = -ENOMEM; - goto out; - } - - if (dwrq->flags & IW_ENCODE_DISABLED) { - disable_wep (assoc_req); - disable_wpa (assoc_req); - goto out; - } - - ret = validate_key_index(assoc_req->wep_tx_keyidx, - (dwrq->flags & IW_ENCODE_INDEX), - &index, &is_default); - if (ret) { - ret = -EINVAL; - goto out; - } - - /* If WEP isn't enabled, or if there is no key data but a valid - * index, set the TX key. - */ - if (!assoc_req->secinfo.wep_enabled || (dwrq->length == 0 && !is_default)) - set_tx_key = 1; - - ret = lbs_set_wep_key(assoc_req, extra, dwrq->length, index, set_tx_key); - if (ret) - goto out; - - if (dwrq->length) - set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); - if (set_tx_key) - set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); - - if (dwrq->flags & IW_ENCODE_RESTRICTED) { - priv->authtype_auto = 0; - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; - } else if (dwrq->flags & IW_ENCODE_OPEN) { - priv->authtype_auto = 0; - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - } - -out: - if (ret == 0) { - set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); - lbs_postpone_association_work(priv); - } else { - lbs_cancel_association_work(priv); - } - mutex_unlock(&priv->lock); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -/** - * @brief Get Extended Encryption key (WPA/802.1x and WEP) - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param vwrq A pointer to iw_param structure - * @param extra A pointer to extra data buf - * @return 0 on success, otherwise failure - */ -static int lbs_get_encodeext(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, - char *extra) -{ - int ret = -EINVAL; - struct lbs_private *priv = dev->ml_priv; - struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; - int index, max_key_len; - - lbs_deb_enter(LBS_DEB_WEXT); - - max_key_len = dwrq->length - sizeof(*ext); - if (max_key_len < 0) - goto out; - - index = dwrq->flags & IW_ENCODE_INDEX; - if (index) { - if (index < 1 || index > 4) - goto out; - index--; - } else { - index = priv->wep_tx_keyidx; - } - - if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) && - ext->alg != IW_ENCODE_ALG_WEP) { - if (index != 0 || priv->mode != IW_MODE_INFRA) - goto out; - } - - dwrq->flags = index + 1; - memset(ext, 0, sizeof(*ext)); - - if ( !priv->secinfo.wep_enabled - && !priv->secinfo.WPAenabled - && !priv->secinfo.WPA2enabled) { - ext->alg = IW_ENCODE_ALG_NONE; - ext->key_len = 0; - dwrq->flags |= IW_ENCODE_DISABLED; - } else { - u8 *key = NULL; - - if ( priv->secinfo.wep_enabled - && !priv->secinfo.WPAenabled - && !priv->secinfo.WPA2enabled) { - /* WEP */ - ext->alg = IW_ENCODE_ALG_WEP; - ext->key_len = priv->wep_keys[index].len; - key = &priv->wep_keys[index].key[0]; - } else if ( !priv->secinfo.wep_enabled - && (priv->secinfo.WPAenabled || - priv->secinfo.WPA2enabled)) { - /* WPA */ - struct enc_key * pkey = NULL; - - if ( priv->wpa_mcast_key.len - && (priv->wpa_mcast_key.flags & KEY_INFO_WPA_ENABLED)) - pkey = &priv->wpa_mcast_key; - else if ( priv->wpa_unicast_key.len - && (priv->wpa_unicast_key.flags & KEY_INFO_WPA_ENABLED)) - pkey = &priv->wpa_unicast_key; - - if (pkey) { - if (pkey->type == KEY_TYPE_ID_AES) { - ext->alg = IW_ENCODE_ALG_CCMP; - } else { - ext->alg = IW_ENCODE_ALG_TKIP; - } - ext->key_len = pkey->len; - key = &pkey->key[0]; - } else { - ext->alg = IW_ENCODE_ALG_TKIP; - ext->key_len = 0; - } - } else { - goto out; - } - - if (ext->key_len > max_key_len) { - ret = -E2BIG; - goto out; - } - - if (ext->key_len) - memcpy(ext->key, key, ext->key_len); - else - dwrq->flags |= IW_ENCODE_NOKEY; - dwrq->flags |= IW_ENCODE_ENABLED; - } - ret = 0; - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -/** - * @brief Set Encryption key Extended (WPA/802.1x and WEP) - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param vwrq A pointer to iw_param structure - * @param extra A pointer to extra data buf - * @return 0 --success, otherwise fail - */ -static int lbs_set_encodeext(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, - char *extra) -{ - int ret = 0; - struct lbs_private *priv = dev->ml_priv; - struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; - int alg = ext->alg; - struct assoc_request * assoc_req; - - lbs_deb_enter(LBS_DEB_WEXT); - - mutex_lock(&priv->lock); - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - ret = -ENOMEM; - goto out; - } - - if ((alg == IW_ENCODE_ALG_NONE) || (dwrq->flags & IW_ENCODE_DISABLED)) { - disable_wep (assoc_req); - disable_wpa (assoc_req); - } else if (alg == IW_ENCODE_ALG_WEP) { - u16 is_default = 0, index, set_tx_key = 0; - - ret = validate_key_index(assoc_req->wep_tx_keyidx, - (dwrq->flags & IW_ENCODE_INDEX), - &index, &is_default); - if (ret) - goto out; - - /* If WEP isn't enabled, or if there is no key data but a valid - * index, or if the set-TX-key flag was passed, set the TX key. - */ - if ( !assoc_req->secinfo.wep_enabled - || (dwrq->length == 0 && !is_default) - || (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) - set_tx_key = 1; - - /* Copy key to driver */ - ret = lbs_set_wep_key(assoc_req, ext->key, ext->key_len, index, - set_tx_key); - if (ret) - goto out; - - if (dwrq->flags & IW_ENCODE_RESTRICTED) { - priv->authtype_auto = 0; - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; - } else if (dwrq->flags & IW_ENCODE_OPEN) { - priv->authtype_auto = 0; - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - } - - /* Mark the various WEP bits as modified */ - set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); - if (dwrq->length) - set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); - if (set_tx_key) - set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); - } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) { - struct enc_key * pkey; - - /* validate key length */ - if (((alg == IW_ENCODE_ALG_TKIP) - && (ext->key_len != KEY_LEN_WPA_TKIP)) - || ((alg == IW_ENCODE_ALG_CCMP) - && (ext->key_len != KEY_LEN_WPA_AES))) { - lbs_deb_wext("invalid size %d for key of alg " - "type %d\n", - ext->key_len, - alg); - ret = -EINVAL; - goto out; - } - - if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { - pkey = &assoc_req->wpa_mcast_key; - set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); - } else { - pkey = &assoc_req->wpa_unicast_key; - set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); - } - - memset(pkey, 0, sizeof (struct enc_key)); - memcpy(pkey->key, ext->key, ext->key_len); - pkey->len = ext->key_len; - if (pkey->len) - pkey->flags |= KEY_INFO_WPA_ENABLED; - - /* Do this after zeroing key structure */ - if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { - pkey->flags |= KEY_INFO_WPA_MCAST; - } else { - pkey->flags |= KEY_INFO_WPA_UNICAST; - } - - if (alg == IW_ENCODE_ALG_TKIP) { - pkey->type = KEY_TYPE_ID_TKIP; - } else if (alg == IW_ENCODE_ALG_CCMP) { - pkey->type = KEY_TYPE_ID_AES; - } - - /* If WPA isn't enabled yet, do that now */ - if ( assoc_req->secinfo.WPAenabled == 0 - && assoc_req->secinfo.WPA2enabled == 0) { - assoc_req->secinfo.WPAenabled = 1; - assoc_req->secinfo.WPA2enabled = 1; - set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); - } - - /* Only disable wep if necessary: can't waste time here. */ - if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE) - disable_wep(assoc_req); - } - -out: - if (ret == 0) { - /* 802.1x and WPA rekeying must happen as quickly as possible, - * especially during the 4-way handshake; thus if in - * infrastructure mode, and either (a) 802.1x is enabled or - * (b) WPA is being used, set the key right away. - */ - if (assoc_req->mode == IW_MODE_INFRA && - ((assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_802_1X) || - (assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_PSK) || - assoc_req->secinfo.WPAenabled || - assoc_req->secinfo.WPA2enabled)) { - lbs_do_association_work(priv); - } else - lbs_postpone_association_work(priv); - } else { - lbs_cancel_association_work(priv); - } - mutex_unlock(&priv->lock); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - - -static int lbs_set_genie(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, - char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - struct assoc_request * assoc_req; - - lbs_deb_enter(LBS_DEB_WEXT); - - mutex_lock(&priv->lock); - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - ret = -ENOMEM; - goto out; - } - - if (dwrq->length > MAX_WPA_IE_LEN || - (dwrq->length && extra == NULL)) { - ret = -EINVAL; - goto out; - } - - if (dwrq->length) { - memcpy(&assoc_req->wpa_ie[0], extra, dwrq->length); - assoc_req->wpa_ie_len = dwrq->length; - } else { - memset(&assoc_req->wpa_ie[0], 0, sizeof(priv->wpa_ie)); - assoc_req->wpa_ie_len = 0; - } - -out: - if (ret == 0) { - set_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags); - lbs_postpone_association_work(priv); - } else { - lbs_cancel_association_work(priv); - } - mutex_unlock(&priv->lock); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_genie(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, - char *extra) -{ - int ret = 0; - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (priv->wpa_ie_len == 0) { - dwrq->length = 0; - goto out; - } - - if (dwrq->length < priv->wpa_ie_len) { - ret = -E2BIG; - goto out; - } - - dwrq->length = priv->wpa_ie_len; - memcpy(extra, &priv->wpa_ie[0], priv->wpa_ie_len); - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - - -static int lbs_set_auth(struct net_device *dev, - struct iw_request_info *info, - struct iw_param *dwrq, - char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - struct assoc_request * assoc_req; - int ret = 0; - int updated = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - mutex_lock(&priv->lock); - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - ret = -ENOMEM; - goto out; - } - - switch (dwrq->flags & IW_AUTH_INDEX) { - case IW_AUTH_PRIVACY_INVOKED: - case IW_AUTH_RX_UNENCRYPTED_EAPOL: - case IW_AUTH_TKIP_COUNTERMEASURES: - case IW_AUTH_CIPHER_PAIRWISE: - case IW_AUTH_CIPHER_GROUP: - case IW_AUTH_DROP_UNENCRYPTED: - /* - * libertas does not use these parameters - */ - break; - - case IW_AUTH_KEY_MGMT: - assoc_req->secinfo.key_mgmt = dwrq->value; - updated = 1; - break; - - case IW_AUTH_WPA_VERSION: - if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) { - assoc_req->secinfo.WPAenabled = 0; - assoc_req->secinfo.WPA2enabled = 0; - disable_wpa (assoc_req); - } - if (dwrq->value & IW_AUTH_WPA_VERSION_WPA) { - assoc_req->secinfo.WPAenabled = 1; - assoc_req->secinfo.wep_enabled = 0; - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - } - if (dwrq->value & IW_AUTH_WPA_VERSION_WPA2) { - assoc_req->secinfo.WPA2enabled = 1; - assoc_req->secinfo.wep_enabled = 0; - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - } - updated = 1; - break; - - case IW_AUTH_80211_AUTH_ALG: - if (dwrq->value & IW_AUTH_ALG_SHARED_KEY) { - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; - } else if (dwrq->value & IW_AUTH_ALG_OPEN_SYSTEM) { - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - } else if (dwrq->value & IW_AUTH_ALG_LEAP) { - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_LEAP; - } else { - ret = -EINVAL; - } - updated = 1; - break; - - case IW_AUTH_WPA_ENABLED: - if (dwrq->value) { - if (!assoc_req->secinfo.WPAenabled && - !assoc_req->secinfo.WPA2enabled) { - assoc_req->secinfo.WPAenabled = 1; - assoc_req->secinfo.WPA2enabled = 1; - assoc_req->secinfo.wep_enabled = 0; - assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - } - } else { - assoc_req->secinfo.WPAenabled = 0; - assoc_req->secinfo.WPA2enabled = 0; - disable_wpa (assoc_req); - } - updated = 1; - break; - - default: - ret = -EOPNOTSUPP; - break; - } - -out: - if (ret == 0) { - if (updated) - set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); - lbs_postpone_association_work(priv); - } else if (ret != -EOPNOTSUPP) { - lbs_cancel_association_work(priv); - } - mutex_unlock(&priv->lock); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_auth(struct net_device *dev, - struct iw_request_info *info, - struct iw_param *dwrq, - char *extra) -{ - int ret = 0; - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - switch (dwrq->flags & IW_AUTH_INDEX) { - case IW_AUTH_KEY_MGMT: - dwrq->value = priv->secinfo.key_mgmt; - break; - - case IW_AUTH_WPA_VERSION: - dwrq->value = 0; - if (priv->secinfo.WPAenabled) - dwrq->value |= IW_AUTH_WPA_VERSION_WPA; - if (priv->secinfo.WPA2enabled) - dwrq->value |= IW_AUTH_WPA_VERSION_WPA2; - if (!dwrq->value) - dwrq->value |= IW_AUTH_WPA_VERSION_DISABLED; - break; - - case IW_AUTH_80211_AUTH_ALG: - dwrq->value = priv->secinfo.auth_mode; - break; - - case IW_AUTH_WPA_ENABLED: - if (priv->secinfo.WPAenabled && priv->secinfo.WPA2enabled) - dwrq->value = 1; - break; - - default: - ret = -EOPNOTSUPP; - } - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - - -static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - int ret = 0; - struct lbs_private *priv = dev->ml_priv; - s16 dbm = (s16) vwrq->value; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (vwrq->disabled) { - lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0); - goto out; - } - - if (vwrq->fixed == 0) { - /* User requests automatic tx power control, however there are - * many auto tx settings. For now use firmware defaults until - * we come up with a good way to expose these to the user. */ - if (priv->fwrelease < 0x09000000) { - ret = lbs_set_power_adapt_cfg(priv, 1, - POW_ADAPT_DEFAULT_P0, - POW_ADAPT_DEFAULT_P1, - POW_ADAPT_DEFAULT_P2); - if (ret) - goto out; - } - ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1, - TPC_DEFAULT_P2, 1); - if (ret) - goto out; - dbm = priv->txpower_max; - } else { - /* Userspace check in iwrange if it should use dBm or mW, - * therefore this should never happen... Jean II */ - if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) { - ret = -EOPNOTSUPP; - goto out; - } - - /* Validate requested power level against firmware allowed - * levels */ - if (priv->txpower_min && (dbm < priv->txpower_min)) { - ret = -EINVAL; - goto out; - } - - if (priv->txpower_max && (dbm > priv->txpower_max)) { - ret = -EINVAL; - goto out; - } - if (priv->fwrelease < 0x09000000) { - ret = lbs_set_power_adapt_cfg(priv, 0, - POW_ADAPT_DEFAULT_P0, - POW_ADAPT_DEFAULT_P1, - POW_ADAPT_DEFAULT_P2); - if (ret) - goto out; - } - ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1, - TPC_DEFAULT_P2, 1); - if (ret) - goto out; - } - - /* If the radio was off, turn it on */ - if (!priv->radio_on) { - ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1); - if (ret) - goto out; - } - - lbs_deb_wext("txpower set %d dBm\n", dbm); - - ret = lbs_set_tx_power(priv, dbm); - -out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - /* - * Note : if dwrq->flags != 0, we should get the relevant SSID from - * the SSID list... - */ - - /* - * Get the current SSID - */ - if (priv->connect_status == LBS_CONNECTED) { - memcpy(extra, priv->curbssparams.ssid, - priv->curbssparams.ssid_len); - } else { - memset(extra, 0, 32); - } - /* - * If none, we may want to get the one that was set - */ - - dwrq->length = priv->curbssparams.ssid_len; - - dwrq->flags = 1; /* active */ - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - u8 ssid[IEEE80211_MAX_SSID_LEN]; - u8 ssid_len = 0; - struct assoc_request * assoc_req; - int in_ssid_len = dwrq->length; - DECLARE_SSID_BUF(ssid_buf); - - lbs_deb_enter(LBS_DEB_WEXT); - - if (!priv->radio_on) { - ret = -EINVAL; - goto out; - } - - /* Check the size of the string */ - if (in_ssid_len > IEEE80211_MAX_SSID_LEN) { - ret = -E2BIG; - goto out; - } - - memset(&ssid, 0, sizeof(ssid)); - - if (!dwrq->flags || !in_ssid_len) { - /* "any" SSID requested; leave SSID blank */ - } else { - /* Specific SSID requested */ - memcpy(&ssid, extra, in_ssid_len); - ssid_len = in_ssid_len; - } - - if (!ssid_len) { - lbs_deb_wext("requested any SSID\n"); - } else { - lbs_deb_wext("requested SSID '%s'\n", - print_ssid(ssid_buf, ssid, ssid_len)); - } - -out: - mutex_lock(&priv->lock); - if (ret == 0) { - /* Get or create the current association request */ - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - ret = -ENOMEM; - } else { - /* Copy the SSID to the association request */ - memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN); - assoc_req->ssid_len = ssid_len; - set_bit(ASSOC_FLAG_SSID, &assoc_req->flags); - lbs_postpone_association_work(priv); - } - } - - /* Cancel the association request if there was an error */ - if (ret != 0) { - lbs_cancel_association_work(priv); - } - - mutex_unlock(&priv->lock); - - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} - -#ifdef CONFIG_LIBERTAS_MESH -static int lbs_mesh_get_essid(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - - lbs_deb_enter(LBS_DEB_WEXT); - - memcpy(extra, priv->mesh_ssid, priv->mesh_ssid_len); - - dwrq->length = priv->mesh_ssid_len; - - dwrq->flags = 1; /* active */ - - lbs_deb_leave(LBS_DEB_WEXT); - return 0; -} - -static int lbs_mesh_set_essid(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - int ret = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (!priv->radio_on) { - ret = -EINVAL; - goto out; - } - - /* Check the size of the string */ - if (dwrq->length > IEEE80211_MAX_SSID_LEN) { - ret = -E2BIG; - goto out; - } - - if (!dwrq->flags || !dwrq->length) { - ret = -EINVAL; - goto out; - } else { - /* Specific SSID requested */ - memcpy(priv->mesh_ssid, extra, dwrq->length); - priv->mesh_ssid_len = dwrq->length; - } - - lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, - priv->channel); - out: - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); - return ret; -} -#endif - -/** - * @brief Connect to the AP or Ad-hoc Network with specific bssid - * - * @param dev A pointer to net_device structure - * @param info A pointer to iw_request_info structure - * @param awrq A pointer to iw_param structure - * @param extra A pointer to extra data buf - * @return 0 --success, otherwise fail - */ -static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) -{ - struct lbs_private *priv = dev->ml_priv; - struct assoc_request * assoc_req; - int ret = 0; - - lbs_deb_enter(LBS_DEB_WEXT); - - if (!priv->radio_on) - return -EINVAL; - - if (awrq->sa_family != ARPHRD_ETHER) - return -EINVAL; - - lbs_deb_wext("ASSOC: WAP: sa_data %pM\n", awrq->sa_data); - - mutex_lock(&priv->lock); - - /* Get or create the current association request */ - assoc_req = lbs_get_association_request(priv); - if (!assoc_req) { - lbs_cancel_association_work(priv); - ret = -ENOMEM; - } else { - /* Copy the BSSID to the association request */ - memcpy(&assoc_req->bssid, awrq->sa_data, ETH_ALEN); - set_bit(ASSOC_FLAG_BSSID, &assoc_req->flags); - lbs_postpone_association_work(priv); - } - - mutex_unlock(&priv->lock); - - return ret; -} - -/* - * iwconfig settable callbacks - */ -static const iw_handler lbs_handler[] = { - (iw_handler) NULL, /* SIOCSIWCOMMIT */ - (iw_handler) lbs_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) lbs_set_freq, /* SIOCSIWFREQ */ - (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */ - (iw_handler) lbs_set_mode, /* SIOCSIWMODE */ - (iw_handler) lbs_get_mode, /* SIOCGIWMODE */ - (iw_handler) NULL, /* SIOCSIWSENS */ - (iw_handler) NULL, /* SIOCGIWSENS */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) lbs_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) NULL, /* SIOCGIWSTATS */ - iw_handler_set_spy, /* SIOCSIWSPY */ - iw_handler_get_spy, /* SIOCGIWSPY */ - iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ - iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ - (iw_handler) lbs_set_wap, /* SIOCSIWAP */ - (iw_handler) lbs_get_wap, /* SIOCGIWAP */ - (iw_handler) NULL, /* SIOCSIWMLME */ - (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */ - (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */ - (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */ - (iw_handler) lbs_set_essid, /* SIOCSIWESSID */ - (iw_handler) lbs_get_essid, /* SIOCGIWESSID */ - (iw_handler) lbs_set_nick, /* SIOCSIWNICKN */ - (iw_handler) lbs_get_nick, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) lbs_set_rate, /* SIOCSIWRATE */ - (iw_handler) lbs_get_rate, /* SIOCGIWRATE */ - (iw_handler) lbs_set_rts, /* SIOCSIWRTS */ - (iw_handler) lbs_get_rts, /* SIOCGIWRTS */ - (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */ - (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */ - (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */ - (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */ - (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */ - (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */ - (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */ - (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */ - (iw_handler) lbs_set_power, /* SIOCSIWPOWER */ - (iw_handler) lbs_get_power, /* SIOCGIWPOWER */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */ - (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */ - (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */ - (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */ - (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */ - (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ - (iw_handler) NULL, /* SIOCSIWPMKSA */ -}; -struct iw_handler_def lbs_handler_def = { - .num_standard = ARRAY_SIZE(lbs_handler), - .standard = (iw_handler *) lbs_handler, - .get_wireless_stats = lbs_get_wireless_stats, -}; - -#ifdef CONFIG_LIBERTAS_MESH -static const iw_handler mesh_wlan_handler[] = { - (iw_handler) NULL, /* SIOCSIWCOMMIT */ - (iw_handler) lbs_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) lbs_mesh_set_freq, /* SIOCSIWFREQ */ - (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */ - (iw_handler) NULL, /* SIOCSIWMODE */ - (iw_handler) mesh_wlan_get_mode, /* SIOCGIWMODE */ - (iw_handler) NULL, /* SIOCSIWSENS */ - (iw_handler) NULL, /* SIOCGIWSENS */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) lbs_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) NULL, /* SIOCGIWSTATS */ - iw_handler_set_spy, /* SIOCSIWSPY */ - iw_handler_get_spy, /* SIOCGIWSPY */ - iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ - iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ - (iw_handler) NULL, /* SIOCSIWAP */ - (iw_handler) NULL, /* SIOCGIWAP */ - (iw_handler) NULL, /* SIOCSIWMLME */ - (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */ - (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */ - (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */ - (iw_handler) lbs_mesh_set_essid,/* SIOCSIWESSID */ - (iw_handler) lbs_mesh_get_essid,/* SIOCGIWESSID */ - (iw_handler) NULL, /* SIOCSIWNICKN */ - (iw_handler) mesh_get_nick, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) lbs_set_rate, /* SIOCSIWRATE */ - (iw_handler) lbs_get_rate, /* SIOCGIWRATE */ - (iw_handler) lbs_set_rts, /* SIOCSIWRTS */ - (iw_handler) lbs_get_rts, /* SIOCGIWRTS */ - (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */ - (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */ - (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */ - (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */ - (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */ - (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */ - (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */ - (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */ - (iw_handler) lbs_set_power, /* SIOCSIWPOWER */ - (iw_handler) lbs_get_power, /* SIOCGIWPOWER */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */ - (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */ - (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */ - (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */ - (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */ - (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ - (iw_handler) NULL, /* SIOCSIWPMKSA */ -}; - -struct iw_handler_def mesh_handler_def = { - .num_standard = ARRAY_SIZE(mesh_wlan_handler), - .standard = (iw_handler *) mesh_wlan_handler, - .get_wireless_stats = lbs_get_wireless_stats, -}; -#endif diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h deleted file mode 100644 index f3f19fe8c6c6e15b6f56364203b1c83f89030701..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/wext.h +++ /dev/null @@ -1,17 +0,0 @@ -/** - * This file contains definition for IOCTL call. - */ -#ifndef _LBS_WEXT_H_ -#define _LBS_WEXT_H_ - -void lbs_send_disconnect_notification(struct lbs_private *priv); -void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); - -struct chan_freq_power *lbs_find_cfp_by_band_and_channel( - struct lbs_private *priv, - u8 band, - u16 channel); - -extern struct iw_handler_def lbs_handler_def; - -#endif diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c index c445500ffc61b95e1e5834aaab6c8a8f2a4e7df6..b172f5d87a3b4e279bced500cc9c81942e8aaaf5 100644 --- a/drivers/net/wireless/libertas_tf/if_usb.c +++ b/drivers/net/wireless/libertas_tf/if_usb.c @@ -538,7 +538,8 @@ static void if_usb_receive_fwload(struct urb *urb) return; } - syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); + syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader), + GFP_ATOMIC); if (!syncfwheader) { lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); kfree_skb(skb); @@ -546,8 +547,6 @@ static void if_usb_receive_fwload(struct urb *urb) return; } - memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader)); - if (!syncfwheader->cmd) { lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h index fbbaaae7a1aefe4274c5ec0d31d58c2d0acdbe83..ad77b92d0b41e260d2df08ad483940d1e95253a1 100644 --- a/drivers/net/wireless/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/libertas_tf/libertas_tf.h @@ -253,6 +253,9 @@ struct lbtf_private { u8 fw_ready; u8 surpriseremoved; struct sk_buff_head bc_ps_buf; + + /* Most recently reported noise in dBm */ + s8 noise; }; /* 802.11-related definitions */ @@ -316,7 +319,7 @@ struct cmd_header { __le16 size; __le16 seqnum; __le16 result; -} __attribute__ ((packed)); +} __packed; struct cmd_ctrl_node { struct list_head list; @@ -369,7 +372,7 @@ struct cmd_ds_get_hw_spec { /*FW/HW capability */ __le32 fwcapinfo; -} __attribute__ ((packed)); +} __packed; struct cmd_ds_mac_control { struct cmd_header hdr; diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 817fffc0de4b5093c869fc11a5f72aa9ca0ab3ba..9278b3c8ee306c4b59c610c156257daf14d47179 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -525,6 +525,22 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw, lbtf_deb_leave(LBTF_DEB_MACOPS); } +static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct lbtf_private *priv = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + + if (idx != 0) + return -ENOENT; + + survey->channel = conf->channel; + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = priv->noise; + + return 0; +} + static const struct ieee80211_ops lbtf_ops = { .tx = lbtf_op_tx, .start = lbtf_op_start, @@ -535,6 +551,7 @@ static const struct ieee80211_ops lbtf_ops = { .prepare_multicast = lbtf_op_prepare_multicast, .configure_filter = lbtf_op_configure_filter, .bss_info_changed = lbtf_op_bss_info_changed, + .get_survey = lbtf_op_get_survey, }; int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) @@ -555,6 +572,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) stats.freq = priv->cur_freq; stats.band = IEEE80211_BAND_2GHZ; stats.signal = prxpd->snr; + priv->noise = prxpd->nf; /* Marvell rate index has a hole at value 4 */ if (prxpd->rx_rate > 4) --prxpd->rx_rate; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6f8cb3ee6fed57610ed56756982daf4a4497b493..01ad7f77383a8f18a58a69dc6648ba907618e069 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -317,7 +317,7 @@ struct hwsim_radiotap_hdr { u8 rt_rate; __le16 rt_channel; __le16 rt_chbitmask; -} __attribute__ ((packed)); +} __packed; static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, @@ -486,8 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct ieee80211_rx_status rx_status; if (data->idle) { - printk(KERN_DEBUG "%s: Trying to TX when idle - reject\n", - wiphy_name(hw->wiphy)); + wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n"); return false; } @@ -576,7 +575,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) static int mac80211_hwsim_start(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; - printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); + wiphy_debug(hw->wiphy, "%s\n", __func__); data->started = 1; return 0; } @@ -587,16 +586,15 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw) struct mac80211_hwsim_data *data = hw->priv; data->started = 0; del_timer(&data->beacon_timer); - printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); + wiphy_debug(hw->wiphy, "%s\n", __func__); } static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", - wiphy_name(hw->wiphy), __func__, vif->type, - vif->addr); + wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", + __func__, vif->type, vif->addr); hwsim_set_magic(vif); return 0; } @@ -605,9 +603,8 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, static void mac80211_hwsim_remove_interface( struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", - wiphy_name(hw->wiphy), __func__, vif->type, - vif->addr); + wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", + __func__, vif->type, vif->addr); hwsim_check_magic(vif); hwsim_clear_magic(vif); } @@ -670,13 +667,14 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) [IEEE80211_SMPS_DYNAMIC] = "dynamic", }; - printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n", - wiphy_name(hw->wiphy), __func__, - conf->channel->center_freq, - hwsim_chantypes[conf->channel_type], - !!(conf->flags & IEEE80211_CONF_IDLE), - !!(conf->flags & IEEE80211_CONF_PS), - smps_modes[conf->smps_mode]); + wiphy_debug(hw->wiphy, + "%s (freq=%d/%s idle=%d ps=%d smps=%s)\n", + __func__, + conf->channel->center_freq, + hwsim_chantypes[conf->channel_type], + !!(conf->flags & IEEE80211_CONF_IDLE), + !!(conf->flags & IEEE80211_CONF_PS), + smps_modes[conf->smps_mode]); data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); @@ -696,7 +694,7 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, { struct mac80211_hwsim_data *data = hw->priv; - printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); + wiphy_debug(hw->wiphy, "%s\n", __func__); data->rx_filter = 0; if (*total_flags & FIF_PROMISC_IN_BSS) @@ -717,26 +715,23 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, hwsim_check_magic(vif); - printk(KERN_DEBUG "%s:%s(changed=0x%x)\n", - wiphy_name(hw->wiphy), __func__, changed); + wiphy_debug(hw->wiphy, "%s(changed=0x%x)\n", __func__, changed); if (changed & BSS_CHANGED_BSSID) { - printk(KERN_DEBUG "%s:%s: BSSID changed: %pM\n", - wiphy_name(hw->wiphy), __func__, - info->bssid); + wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n", + __func__, info->bssid); memcpy(vp->bssid, info->bssid, ETH_ALEN); } if (changed & BSS_CHANGED_ASSOC) { - printk(KERN_DEBUG " %s: ASSOC: assoc=%d aid=%d\n", - wiphy_name(hw->wiphy), info->assoc, info->aid); + wiphy_debug(hw->wiphy, " ASSOC: assoc=%d aid=%d\n", + info->assoc, info->aid); vp->assoc = info->assoc; vp->aid = info->aid; } if (changed & BSS_CHANGED_BEACON_INT) { - printk(KERN_DEBUG " %s: BCNINT: %d\n", - wiphy_name(hw->wiphy), info->beacon_int); + wiphy_debug(hw->wiphy, " BCNINT: %d\n", info->beacon_int); data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; if (WARN_ON(!data->beacon_int)) data->beacon_int = 1; @@ -746,31 +741,28 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_ERP_CTS_PROT) { - printk(KERN_DEBUG " %s: ERP_CTS_PROT: %d\n", - wiphy_name(hw->wiphy), info->use_cts_prot); + wiphy_debug(hw->wiphy, " ERP_CTS_PROT: %d\n", + info->use_cts_prot); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { - printk(KERN_DEBUG " %s: ERP_PREAMBLE: %d\n", - wiphy_name(hw->wiphy), info->use_short_preamble); + wiphy_debug(hw->wiphy, " ERP_PREAMBLE: %d\n", + info->use_short_preamble); } if (changed & BSS_CHANGED_ERP_SLOT) { - printk(KERN_DEBUG " %s: ERP_SLOT: %d\n", - wiphy_name(hw->wiphy), info->use_short_slot); + wiphy_debug(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot); } if (changed & BSS_CHANGED_HT) { - printk(KERN_DEBUG " %s: HT: op_mode=0x%x, chantype=%s\n", - wiphy_name(hw->wiphy), - info->ht_operation_mode, - hwsim_chantypes[info->channel_type]); + wiphy_debug(hw->wiphy, " HT: op_mode=0x%x, chantype=%s\n", + info->ht_operation_mode, + hwsim_chantypes[info->channel_type]); } if (changed & BSS_CHANGED_BASIC_RATES) { - printk(KERN_DEBUG " %s: BASIC_RATES: 0x%llx\n", - wiphy_name(hw->wiphy), - (unsigned long long) info->basic_rates); + wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n", + (unsigned long long) info->basic_rates); } } @@ -824,10 +816,11 @@ static int mac80211_hwsim_conf_tx( struct ieee80211_hw *hw, u16 queue, const struct ieee80211_tx_queue_params *params) { - printk(KERN_DEBUG "%s:%s (queue=%d txop=%d cw_min=%d cw_max=%d " - "aifs=%d)\n", - wiphy_name(hw->wiphy), __func__, queue, - params->txop, params->cw_min, params->cw_max, params->aifs); + wiphy_debug(hw->wiphy, + "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n", + __func__, queue, + params->txop, params->cw_min, + params->cw_max, params->aifs); return 0; } @@ -837,8 +830,7 @@ static int mac80211_hwsim_get_survey( { struct ieee80211_conf *conf = &hw->conf; - printk(KERN_DEBUG "%s:%s (idx=%d)\n", - wiphy_name(hw->wiphy), __func__, idx); + wiphy_debug(hw->wiphy, "%s (idx=%d)\n", __func__, idx); if (idx != 0) return -ENOENT; @@ -1108,8 +1100,9 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) if (!vp->assoc) return; - printk(KERN_DEBUG "%s:%s: send PS-Poll to %pM for aid %d\n", - wiphy_name(data->hw->wiphy), __func__, vp->bssid, vp->aid); + wiphy_debug(data->hw->wiphy, + "%s: send PS-Poll to %pM for aid %d\n", + __func__, vp->bssid, vp->aid); skb = dev_alloc_skb(sizeof(*pspoll)); if (!skb) @@ -1137,8 +1130,9 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, if (!vp->assoc) return; - printk(KERN_DEBUG "%s:%s: send data::nullfunc to %pM ps=%d\n", - wiphy_name(data->hw->wiphy), __func__, vp->bssid, ps); + wiphy_debug(data->hw->wiphy, + "%s: send data::nullfunc to %pM ps=%d\n", + __func__, vp->bssid, ps); skb = dev_alloc_skb(sizeof(*hdr)); if (!skb) @@ -1291,6 +1285,11 @@ static int __init init_mac80211_hwsim(void) hw->wiphy->n_addresses = 2; hw->wiphy->addresses = data->addresses; + if (fake_hw_scan) { + hw->wiphy->max_scan_ssids = 255; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + } + hw->channel_change_time = 1; hw->queues = 4; hw->wiphy->interface_modes = @@ -1468,9 +1467,8 @@ static int __init init_mac80211_hwsim(void) break; } - printk(KERN_DEBUG "%s: hwaddr %pM registered\n", - wiphy_name(hw->wiphy), - hw->wiphy->perm_addr); + wiphy_debug(hw->wiphy, "hwaddr %pm registered\n", + hw->wiphy->perm_addr); data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir); diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 808adb9090952347feeaafe03b801bb8ae681b62..d761ed2d8af46b8d8be19eab01101dc511321b00 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -86,7 +86,7 @@ struct rxd_ops { void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr); void (*rxd_refill)(void *rxd, dma_addr_t addr, int len); int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status, - __le16 *qos); + __le16 *qos, s8 *noise); }; struct mwl8k_device_info { @@ -109,7 +109,7 @@ struct mwl8k_rx_queue { dma_addr_t rxd_dma; struct { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(dma) + DEFINE_DMA_UNMAP_ADDR(dma); } *buf; }; @@ -207,6 +207,9 @@ struct mwl8k_priv { /* Tasklet to perform RX. */ struct tasklet_struct poll_rx_task; + + /* Most recently reported noise in dBm */ + s8 noise; }; /* Per interface specific private data */ @@ -314,13 +317,15 @@ static const struct ieee80211_rate mwl8k_rates_50[] = { #define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ #define MWL8K_CMD_UPDATE_STADB 0x1123 -static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize) +static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) { + u16 command = le16_to_cpu(cmd); + #define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\ snprintf(buf, bufsize, "%s", #x);\ return buf;\ } while (0) - switch (cmd & ~0x8000) { + switch (command & ~0x8000) { MWL8K_CMDNAME(CODE_DNLD); MWL8K_CMDNAME(GET_HW_SPEC); MWL8K_CMDNAME(SET_HW_SPEC); @@ -426,7 +431,7 @@ struct mwl8k_cmd_pkt { __u8 macid; __le16 result; char payload[0]; -} __attribute__((packed)); +} __packed; /* * Firmware loading. @@ -632,7 +637,7 @@ struct mwl8k_dma_data { __le16 fwlen; struct ieee80211_hdr wh; char data[0]; -} __attribute__((packed)); +} __packed; /* Routines to add/remove DMA header from skb. */ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) @@ -711,7 +716,7 @@ struct mwl8k_rxd_8366_ap { __u8 rx_status; __u8 channel; __u8 rx_ctrl; -} __attribute__((packed)); +} __packed; #define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 #define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 @@ -739,7 +744,7 @@ static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len) static int mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, - __le16 *qos) + __le16 *qos, s8 *noise) { struct mwl8k_rxd_8366_ap *rxd = _rxd; @@ -750,6 +755,7 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, memset(status, 0, sizeof(*status)); status->signal = -rxd->rssi; + *noise = -rxd->noise_floor; if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { status->flag |= RX_FLAG_HT; @@ -806,7 +812,7 @@ struct mwl8k_rxd_sta { __u8 rx_ctrl; __u8 rx_status; __u8 pad2[2]; -} __attribute__((packed)); +} __packed; #define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000 #define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) @@ -837,7 +843,7 @@ static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len) static int mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, - __le16 *qos) + __le16 *qos, s8 *noise) { struct mwl8k_rxd_sta *rxd = _rxd; u16 rate_info; @@ -851,6 +857,7 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, memset(status, 0, sizeof(*status)); status->signal = -rxd->rssi; + *noise = -rxd->noise_level; status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info); status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); @@ -903,16 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); if (rxq->rxd == NULL) { - printk(KERN_ERR "%s: failed to alloc RX descriptors\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n"); return -ENOMEM; } memset(rxq->rxd, 0, size); rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); if (rxq->buf == NULL) { - printk(KERN_ERR "%s: failed to alloc RX skbuff list\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n"); pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); return -ENOMEM; } @@ -963,7 +968,7 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit) if (rxq->tail == MWL8K_RX_DESCS) rxq->tail = 0; rxq->buf[rx].skb = skb; - pci_unmap_addr_set(&rxq->buf[rx], dma, addr); + dma_unmap_addr_set(&rxq->buf[rx], dma, addr); rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size); priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ); @@ -984,9 +989,9 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index) for (i = 0; i < MWL8K_RX_DESCS; i++) { if (rxq->buf[i].skb != NULL) { pci_unmap_single(priv->pdev, - pci_unmap_addr(&rxq->buf[i], dma), + dma_unmap_addr(&rxq->buf[i], dma), MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); - pci_unmap_addr_set(&rxq->buf[i], dma, 0); + dma_unmap_addr_set(&rxq->buf[i], dma, 0); kfree_skb(rxq->buf[i].skb); rxq->buf[i].skb = NULL; @@ -1053,16 +1058,17 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size); - pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos); + pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos, + &priv->noise); if (pkt_len < 0) break; rxq->buf[rxq->head].skb = NULL; pci_unmap_single(priv->pdev, - pci_unmap_addr(&rxq->buf[rxq->head], dma), + dma_unmap_addr(&rxq->buf[rxq->head], dma), MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); - pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); + dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); rxq->head++; if (rxq->head == MWL8K_RX_DESCS) @@ -1120,7 +1126,7 @@ struct mwl8k_tx_desc { __le16 rate_info; __u8 peer_id; __u8 tx_frag_cnt; -} __attribute__((packed)); +} __packed; #define MWL8K_TX_DESCS 128 @@ -1139,16 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); if (txq->txd == NULL) { - printk(KERN_ERR "%s: failed to alloc TX descriptors\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n"); return -ENOMEM; } memset(txq->txd, 0, size); txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { - printk(KERN_ERR "%s: failed to alloc TX skbuff list\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n"); pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); return -ENOMEM; } @@ -1204,11 +1208,12 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw) unused++; } - printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d " - "fw_owned=%d drv_owned=%d unused=%d\n", - wiphy_name(hw->wiphy), i, - txq->len, txq->head, txq->tail, - fw_owned, drv_owned, unused); + wiphy_err(hw->wiphy, + "txq[%d] len=%d head=%d tail=%d " + "fw_owned=%d drv_owned=%d unused=%d\n", + i, + txq->len, txq->head, txq->tail, + fw_owned, drv_owned, unused); } } @@ -1252,25 +1257,23 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) if (timeout) { WARN_ON(priv->pending_tx_pkts); if (retry) { - printk(KERN_NOTICE "%s: tx rings drained\n", - wiphy_name(hw->wiphy)); + wiphy_notice(hw->wiphy, "tx rings drained\n"); } break; } if (priv->pending_tx_pkts < oldcount) { - printk(KERN_NOTICE "%s: waiting for tx rings " - "to drain (%d -> %d pkts)\n", - wiphy_name(hw->wiphy), oldcount, - priv->pending_tx_pkts); + wiphy_notice(hw->wiphy, + "waiting for tx rings to drain (%d -> %d pkts)\n", + oldcount, priv->pending_tx_pkts); retry = 1; continue; } priv->tx_wait = NULL; - printk(KERN_ERR "%s: tx rings stuck for %d ms\n", - wiphy_name(hw->wiphy), MWL8K_TX_WAIT_TIMEOUT_MS); + wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n", + MWL8K_TX_WAIT_TIMEOUT_MS); mwl8k_dump_tx_rings(hw); rc = -ETIMEDOUT; @@ -1421,8 +1424,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, dma)) { - printk(KERN_DEBUG "%s: failed to dma map skb, " - "dropping TX frame.\n", wiphy_name(hw->wiphy)); + wiphy_debug(hw->wiphy, + "failed to dma map skb, dropping TX frame.\n"); dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -1538,7 +1541,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) unsigned long timeout = 0; u8 buf[32]; - cmd->result = 0xffff; + cmd->result = (__force __le16) 0xffff; dma_size = le16_to_cpu(cmd->length); dma_addr = pci_map_single(priv->pdev, cmd, dma_size, PCI_DMA_BIDIRECTIONAL); @@ -1570,10 +1573,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) PCI_DMA_BIDIRECTIONAL); if (!timeout) { - printk(KERN_ERR "%s: Command %s timeout after %u ms\n", - wiphy_name(hw->wiphy), - mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), - MWL8K_CMD_TIMEOUT_MS); + wiphy_err(hw->wiphy, "command %s timeout after %u ms\n", + mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), + MWL8K_CMD_TIMEOUT_MS); rc = -ETIMEDOUT; } else { int ms; @@ -1582,15 +1584,14 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) rc = cmd->result ? -EINVAL : 0; if (rc) - printk(KERN_ERR "%s: Command %s error 0x%x\n", - wiphy_name(hw->wiphy), - mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), - le16_to_cpu(cmd->result)); + wiphy_err(hw->wiphy, "command %s error 0x%x\n", + mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), + le16_to_cpu(cmd->result)); else if (ms > 2000) - printk(KERN_NOTICE "%s: Command %s took %d ms\n", - wiphy_name(hw->wiphy), - mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), - ms); + wiphy_notice(hw->wiphy, "command %s took %d ms\n", + mwl8k_cmd_name(cmd->code, + buf, sizeof(buf)), + ms); } return rc; @@ -1666,7 +1667,7 @@ struct mwl8k_cmd_get_hw_spec_sta { __le32 caps2; __le32 num_tx_desc_per_queue; __le32 total_rxd; -} __attribute__((packed)); +} __packed; #define MWL8K_CAP_MAX_AMSDU 0x20000000 #define MWL8K_CAP_GREENFIELD 0x08000000 @@ -1810,7 +1811,7 @@ struct mwl8k_cmd_get_hw_spec_ap { __le32 wcbbase1; __le32 wcbbase2; __le32 wcbbase3; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) { @@ -1842,22 +1843,22 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) priv->sta_macids_supported = 0x00000000; off = le32_to_cpu(cmd->wcbbase0) & 0xffff; - iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off); + iowrite32(priv->txq[0].txd_dma, priv->sram + off); off = le32_to_cpu(cmd->rxwrptr) & 0xffff; - iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off); + iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); off = le32_to_cpu(cmd->rxrdptr) & 0xffff; - iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off); + iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); off = le32_to_cpu(cmd->wcbbase1) & 0xffff; - iowrite32(cpu_to_le32(priv->txq[1].txd_dma), priv->sram + off); + iowrite32(priv->txq[1].txd_dma, priv->sram + off); off = le32_to_cpu(cmd->wcbbase2) & 0xffff; - iowrite32(cpu_to_le32(priv->txq[2].txd_dma), priv->sram + off); + iowrite32(priv->txq[2].txd_dma, priv->sram + off); off = le32_to_cpu(cmd->wcbbase3) & 0xffff; - iowrite32(cpu_to_le32(priv->txq[3].txd_dma), priv->sram + off); + iowrite32(priv->txq[3].txd_dma, priv->sram + off); } kfree(cmd); @@ -1883,7 +1884,7 @@ struct mwl8k_cmd_set_hw_spec { __le32 flags; __le32 num_tx_desc_per_queue; __le32 total_rxd; -} __attribute__((packed)); +} __packed; #define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 @@ -1985,7 +1986,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, struct mwl8k_cmd_get_stat { struct mwl8k_cmd_pkt header; __le32 stats[64]; -} __attribute__((packed)); +} __packed; #define MWL8K_STAT_ACK_FAILURE 9 #define MWL8K_STAT_RTS_FAILURE 12 @@ -2029,7 +2030,7 @@ struct mwl8k_cmd_radio_control { __le16 action; __le16 control; __le16 radio_on; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force) @@ -2092,7 +2093,7 @@ struct mwl8k_cmd_rf_tx_power { __le16 current_level; __le16 reserved; __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) { @@ -2121,7 +2122,7 @@ struct mwl8k_cmd_rf_antenna { struct mwl8k_cmd_pkt header; __le16 antenna; __le16 mode; -} __attribute__((packed)); +} __packed; #define MWL8K_RF_ANTENNA_RX 1 #define MWL8K_RF_ANTENNA_TX 2 @@ -2182,7 +2183,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw, */ struct mwl8k_cmd_set_pre_scan { struct mwl8k_cmd_pkt header; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) { @@ -2209,7 +2210,7 @@ struct mwl8k_cmd_set_post_scan { struct mwl8k_cmd_pkt header; __le32 isibss; __u8 bssid[ETH_ALEN]; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac) @@ -2240,7 +2241,7 @@ struct mwl8k_cmd_set_rf_channel { __le16 action; __u8 current_channel; __le32 channel_flags; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, struct ieee80211_conf *conf) @@ -2293,7 +2294,7 @@ struct mwl8k_cmd_update_set_aid { __u8 bssid[ETH_ALEN]; __le16 protection_mode; __u8 supp_rates[14]; -} __attribute__((packed)); +} __packed; static void legacy_rate_mask_to_array(u8 *rates, u32 mask) { @@ -2364,7 +2365,7 @@ struct mwl8k_cmd_set_rate { /* Bitmap for supported MCS codes. */ __u8 mcs_set[16]; __u8 reserved[16]; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -2397,7 +2398,7 @@ struct mwl8k_cmd_finalize_join { struct mwl8k_cmd_pkt header; __le32 sleep_interval; /* Number of beacon periods to sleep */ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, int framelen, int dtim) @@ -2436,7 +2437,7 @@ struct mwl8k_cmd_set_rts_threshold { struct mwl8k_cmd_pkt header; __le16 action; __le16 threshold; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) @@ -2466,7 +2467,7 @@ struct mwl8k_cmd_set_slot { struct mwl8k_cmd_pkt header; __le16 action; __u8 short_slot; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) { @@ -2528,7 +2529,7 @@ struct mwl8k_cmd_set_edca_params { __u8 txq; } sta; }; -} __attribute__((packed)); +} __packed; #define MWL8K_SET_EDCA_CW 0x01 #define MWL8K_SET_EDCA_TXOP 0x02 @@ -2579,7 +2580,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, struct mwl8k_cmd_set_wmm_mode { struct mwl8k_cmd_pkt header; __le16 action; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) { @@ -2612,7 +2613,7 @@ struct mwl8k_cmd_mimo_config { __le32 action; __u8 rx_antenna_map; __u8 tx_antenna_map; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) { @@ -2652,7 +2653,7 @@ struct mwl8k_cmd_use_fixed_rate_sta { __le32 rate_type; __le32 reserved1; __le32 reserved2; -} __attribute__((packed)); +} __packed; #define MWL8K_USE_AUTO_RATE 0x0002 #define MWL8K_UCAST_RATE 0 @@ -2694,7 +2695,7 @@ struct mwl8k_cmd_use_fixed_rate_ap { u8 multicast_rate; u8 multicast_rate_type; u8 management_rate; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) @@ -2724,7 +2725,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) struct mwl8k_cmd_enable_sniffer { struct mwl8k_cmd_pkt header; __le32 action; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) { @@ -2757,7 +2758,7 @@ struct mwl8k_cmd_set_mac_addr { } mbss; __u8 mac_addr[ETH_ALEN]; }; -} __attribute__((packed)); +} __packed; #define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0 #define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1 @@ -2812,7 +2813,7 @@ struct mwl8k_cmd_set_rate_adapt_mode { struct mwl8k_cmd_pkt header; __le16 action; __le16 mode; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) { @@ -2840,7 +2841,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) struct mwl8k_cmd_bss_start { struct mwl8k_cmd_pkt header; __le32 enable; -} __attribute__((packed)); +} __packed; static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int enable) @@ -2885,7 +2886,7 @@ struct mwl8k_cmd_set_new_stn { __u8 add_qos_info; __u8 is_qos_sta; __le32 fw_sta_ptr; -} __attribute__((packed)); +} __packed; #define MWL8K_STA_ACTION_ADD 0 #define MWL8K_STA_ACTION_REMOVE 2 @@ -2978,7 +2979,7 @@ struct ewc_ht_info { __le16 control1; __le16 control2; __le16 control3; -} __attribute__((packed)); +} __packed; struct peer_capability_info { /* Peer type - AP vs. STA. */ @@ -3007,7 +3008,7 @@ struct peer_capability_info { __u8 pad2; __u8 station_id; __le16 amsdu_enabled; -} __attribute__((packed)); +} __packed; struct mwl8k_cmd_update_stadb { struct mwl8k_cmd_pkt header; @@ -3022,7 +3023,7 @@ struct mwl8k_cmd_update_stadb { /* Peer info - valid during add/update. */ struct peer_capability_info peer_info; -} __attribute__((packed)); +} __packed; #define MWL8K_STA_DB_MODIFY_ENTRY 1 #define MWL8K_STA_DB_DEL_ENTRY 2 @@ -3052,7 +3053,7 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw, p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT; p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability); p->ht_support = sta->ht_cap.ht_supported; - p->ht_caps = sta->ht_cap.cap; + p->ht_caps = cpu_to_le16(sta->ht_cap.cap); p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | ((sta->ht_cap.ampdu_density & 7) << 2); if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) @@ -3190,8 +3191,8 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) int rc; if (!priv->radio_on) { - printk(KERN_DEBUG "%s: dropped TX frame since radio " - "disabled\n", wiphy_name(hw->wiphy)); + wiphy_debug(hw->wiphy, + "dropped TX frame since radio disabled\n"); dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -3209,8 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw) rc = request_irq(priv->pdev->irq, mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { - printk(KERN_ERR "%s: failed to register IRQ handler\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "failed to register irq handler\n"); return -EIO; } @@ -3297,9 +3297,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw, * mode. (Sniffer mode is only used on STA firmware.) */ if (priv->sniffer_enabled) { - printk(KERN_INFO "%s: unable to create STA " - "interface due to sniffer mode being enabled\n", - wiphy_name(hw->wiphy)); + wiphy_info(hw->wiphy, + "unable to create STA interface because sniffer mode is enabled\n"); return -EINVAL; } @@ -3581,9 +3580,8 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw, */ if (!list_empty(&priv->vif_list)) { if (net_ratelimit()) - printk(KERN_INFO "%s: not enabling sniffer " - "mode because STA interface is active\n", - wiphy_name(hw->wiphy)); + wiphy_info(hw->wiphy, + "not enabling sniffer mode because STA interface is active\n"); return 0; } @@ -3763,6 +3761,22 @@ static int mwl8k_get_stats(struct ieee80211_hw *hw, return mwl8k_cmd_get_stat(hw, stats); } +static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct mwl8k_priv *priv = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + + if (idx != 0) + return -ENOENT; + + survey->channel = conf->channel; + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = priv->noise; + + return 0; +} + static int mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, @@ -3794,6 +3808,7 @@ static const struct ieee80211_ops mwl8k_ops = { .sta_remove = mwl8k_sta_remove, .conf_tx = mwl8k_conf_tx, .get_stats = mwl8k_get_stats, + .get_survey = mwl8k_get_survey, .ampdu_action = mwl8k_ampdu_action, }; @@ -3911,8 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, priv->sram = pci_iomap(pdev, 0, 0x10000); if (priv->sram == NULL) { - printk(KERN_ERR "%s: Cannot map device SRAM\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "cannot map device sram\n"); goto err_iounmap; } @@ -3924,8 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, if (priv->regs == NULL) { priv->regs = pci_iomap(pdev, 2, 0x10000); if (priv->regs == NULL) { - printk(KERN_ERR "%s: Cannot map device registers\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "cannot map device registers\n"); goto err_iounmap; } } @@ -3937,16 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, /* Ask userland hotplug daemon for the device firmware */ rc = mwl8k_request_firmware(priv); if (rc) { - printk(KERN_ERR "%s: Firmware files not found\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "firmware files not found\n"); goto err_stop_firmware; } /* Load firmware into hardware */ rc = mwl8k_load_firmware(hw); if (rc) { - printk(KERN_ERR "%s: Cannot start firmware\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "cannot start firmware\n"); goto err_stop_firmware; } @@ -3957,9 +3968,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, if (priv->ap_fw) { priv->rxd_ops = priv->device_info->ap_rxd_ops; if (priv->rxd_ops == NULL) { - printk(KERN_ERR "%s: Driver does not have AP " - "firmware image support for this hardware\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, + "Driver does not have AP firmware image support for this hardware\n"); goto err_stop_firmware; } } else { @@ -4037,8 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = request_irq(priv->pdev->irq, mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { - printk(KERN_ERR "%s: failed to register IRQ handler\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "failed to register irq handler\n"); goto err_free_queues; } @@ -4058,8 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = mwl8k_cmd_get_hw_spec_sta(hw); } if (rc) { - printk(KERN_ERR "%s: Cannot initialise firmware\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "cannot initialise firmware\n"); goto err_free_irq; } @@ -4073,15 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, /* Turn radio off */ rc = mwl8k_cmd_radio_disable(hw); if (rc) { - printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "cannot disable\n"); goto err_free_irq; } /* Clear MAC address */ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); if (rc) { - printk(KERN_ERR "%s: Cannot clear MAC address\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "cannot clear mac address\n"); goto err_free_irq; } @@ -4091,17 +4098,16 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = ieee80211_register_hw(hw); if (rc) { - printk(KERN_ERR "%s: Cannot register device\n", - wiphy_name(hw->wiphy)); + wiphy_err(hw->wiphy, "cannot register device\n"); goto err_free_queues; } - printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n", - wiphy_name(hw->wiphy), priv->device_info->part_name, - priv->hw_rev, hw->wiphy->perm_addr, - priv->ap_fw ? "AP" : "STA", - (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, - (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); + wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n", + priv->device_info->part_name, + priv->hw_rev, hw->wiphy->perm_addr, + priv->ap_fw ? "AP" : "STA", + (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, + (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); return 0; diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c index 8c4169c227ae8786bdf9429012594b6521cbdbb4..09fae2f0ea08010309618f93ab0ebdb5f64be84e 100644 --- a/drivers/net/wireless/orinoco/cfg.c +++ b/drivers/net/wireless/orinoco/cfg.c @@ -117,9 +117,8 @@ static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev, case NL80211_IFTYPE_MONITOR: if (priv->broken_monitor && !force_monitor) { - printk(KERN_WARNING "%s: Monitor mode support is " - "buggy in this firmware, not enabling\n", - wiphy_name(wiphy)); + wiphy_warn(wiphy, + "Monitor mode support is buggy in this firmware, not enabling\n"); err = -EINVAL; } break; diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c index 3e1947d097ca83faf849b8de051eb506247f9cb3..259d7585398493720d68ceaa285ebe850a063cdd 100644 --- a/drivers/net/wireless/orinoco/fw.c +++ b/drivers/net/wireless/orinoco/fw.c @@ -49,7 +49,7 @@ struct orinoco_fw_header { __le32 pri_offset; /* Offset to primary plug data */ __le32 compat_offset; /* Offset to compatibility data*/ char signature[0]; /* FW signature length headersize-20 */ -} __attribute__ ((packed)); +} __packed; /* Check the range of various header entries. Return a pointer to a * description of the problem, or NULL if everything checks out. */ diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h index 9ca34e722b45fc0d335b42ad339f40d25305c65e..d9f18c11682aad1be81f00e07b7cd5e24bf47d4b 100644 --- a/drivers/net/wireless/orinoco/hermes.h +++ b/drivers/net/wireless/orinoco/hermes.h @@ -205,7 +205,7 @@ struct hermes_tx_descriptor { u8 retry_count; u8 tx_rate; __le16 tx_control; -} __attribute__ ((packed)); +} __packed; #define HERMES_TXSTAT_RETRYERR (0x0001) #define HERMES_TXSTAT_AGEDERR (0x0002) @@ -254,7 +254,7 @@ struct hermes_tallies_frame { /* Those last are probably not available in very old firmwares */ __le16 RxDiscards_WEPICVError; __le16 RxDiscards_WEPExcluded; -} __attribute__ ((packed)); +} __packed; /* Grabbed from wlan-ng - Thanks Mark... - Jean II * This is the result of a scan inquiry command */ @@ -271,7 +271,7 @@ struct prism2_scan_apinfo { u8 rates[10]; /* Bit rate supported */ __le16 proberesp_rate; /* Data rate of the response frame */ __le16 atim; /* ATIM window time, Kus (hostscan only) */ -} __attribute__ ((packed)); +} __packed; /* Same stuff for the Lucent/Agere card. * Thanks to h1kari - Jean II */ @@ -285,7 +285,7 @@ struct agere_scan_apinfo { /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ __le16 essid_len; /* ESSID length */ u8 essid[32]; /* ESSID of the network */ -} __attribute__ ((packed)); +} __packed; /* Moustafa: Scan structure for Symbol cards */ struct symbol_scan_apinfo { @@ -303,7 +303,7 @@ struct symbol_scan_apinfo { __le16 basic_rates; /* Basic rates bitmask */ u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ -} __attribute__ ((packed)); +} __packed; union hermes_scan_info { struct agere_scan_apinfo a; @@ -343,7 +343,7 @@ struct agere_ext_scan_info { __le16 beacon_interval; __le16 capabilities; u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) #define HERMES_LINKSTATUS_CONNECTED (0x0001) @@ -355,7 +355,7 @@ struct agere_ext_scan_info { struct hermes_linkstatus { __le16 linkstatus; /* Link status */ -} __attribute__ ((packed)); +} __packed; struct hermes_response { u16 status, resp0, resp1, resp2; @@ -365,11 +365,11 @@ struct hermes_response { struct hermes_idstring { __le16 len; __le16 val[16]; -} __attribute__ ((packed)); +} __packed; struct hermes_multicast { u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; /* Timeouts */ #define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c index 6da85e75fce071e2205a1a95767e8ee891af3dbc..2b2b9a1a979c95aa4a0ed84a38ea620264c0c4bd 100644 --- a/drivers/net/wireless/orinoco/hermes_dld.c +++ b/drivers/net/wireless/orinoco/hermes_dld.c @@ -65,10 +65,10 @@ struct dblock { __le32 addr; /* adapter address where to write the block */ __le16 len; /* length of the data only, in bytes */ char data[0]; /* data to be written */ -} __attribute__ ((packed)); +} __packed; /* - * Plug Data References are located in in the image after the last data + * Plug Data References are located in the image after the last data * block. They refer to areas in the adapter memory where the plug data * items with matching ID should be written. */ @@ -77,7 +77,7 @@ struct pdr { __le32 addr; /* adapter address where to write the data */ __le32 len; /* expected length of the data, in bytes */ char next[0]; /* next PDR starts here */ -} __attribute__ ((packed)); +} __packed; /* * Plug Data Items are located in the EEPROM read from the adapter by @@ -88,7 +88,7 @@ struct pdi { __le16 len; /* length of ID and data, in words */ __le16 id; /* record ID */ char data[0]; /* plug data */ -} __attribute__ ((packed)); +} __packed; /*** FW data block access functions ***/ @@ -317,7 +317,7 @@ static const struct { \ __le16 len; \ __le16 id; \ u8 val[length]; \ -} __attribute__ ((packed)) default_pdr_data_##pid = { \ +} __packed default_pdr_data_##pid = { \ cpu_to_le16((sizeof(default_pdr_data_##pid)/ \ sizeof(__le16)) - 1), \ cpu_to_le16(pid), \ diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 6fbd78850123fe100737f5bfec9d69005857d254..077baa86756b07db035799d319fa366abdd01490 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c @@ -45,7 +45,7 @@ static const struct { /* Firmware version encoding */ struct comp_id { u16 id, variant, major, minor; -} __attribute__ ((packed)); +} __packed; static inline fwtype_t determine_firmware_type(struct comp_id *nic_id) { @@ -995,7 +995,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, u8 tx_mic[MIC_KEYLEN]; u8 rx_mic[MIC_KEYLEN]; u8 tsc[ORINOCO_SEQ_LEN]; - } __attribute__ ((packed)) buf; + } __packed buf; hermes_t *hw = &priv->hw; int ret; int err; @@ -1326,7 +1326,7 @@ int orinoco_hw_disassociate(struct orinoco_private *priv, struct { u8 addr[ETH_ALEN]; __le16 reason_code; - } __attribute__ ((packed)) buf; + } __packed buf; /* Currently only supported by WPA enabled Agere fw */ if (!priv->has_wpa) diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index ca71f08709bcd0616b8f9d19d2a3332e6f74122a..e8e2d0f4763db4f33e899ad712595d9af5173333 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -172,7 +172,7 @@ struct hermes_txexc_data { __le16 frame_ctl; __le16 duration_id; u8 addr1[ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; /* Rx frame header except compatibility 802.3 header */ struct hermes_rx_descriptor { @@ -196,7 +196,7 @@ struct hermes_rx_descriptor { /* Data length */ __le16 data_len; -} __attribute__ ((packed)); +} __packed; struct orinoco_rx_data { struct hermes_rx_descriptor *desc; @@ -390,7 +390,7 @@ int orinoco_process_xmit_skb(struct sk_buff *skb, struct header_struct { struct ethhdr eth; /* 802.3 header */ u8 encap[6]; /* 802.2 header */ - } __attribute__ ((packed)) hdr; + } __packed hdr; int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN); if (skb_headroom(skb) < ENCAPS_OVERHEAD) { @@ -1170,7 +1170,7 @@ static void orinoco_join_ap(struct work_struct *work) struct join_req { u8 bssid[ETH_ALEN]; __le16 channel; - } __attribute__ ((packed)) req; + } __packed req; const int atom_len = offsetof(struct prism2_scan_apinfo, atim); struct prism2_scan_apinfo *atom = NULL; int offset = 4; @@ -1410,7 +1410,7 @@ void __orinoco_ev_info(struct net_device *dev, hermes_t *hw) struct { __le16 len; __le16 type; - } __attribute__ ((packed)) info; + } __packed info; int len, type; int err; diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h index a6da86e0a70faaf7d88bd76b95af9a4fd9846448..255710ef082a69a4560b41da56f1fb4087e49847 100644 --- a/drivers/net/wireless/orinoco/orinoco.h +++ b/drivers/net/wireless/orinoco/orinoco.h @@ -32,7 +32,7 @@ struct orinoco_key { __le16 len; /* always stored as little-endian */ char data[ORINOCO_MAX_KEY_SIZE]; -} __attribute__ ((packed)); +} __packed; #define TKIP_KEYLEN 16 #define MIC_KEYLEN 8 diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index 78f089baa8c989933c6b2dc290a6f4e5507f1256..a38a7bd25f1916431bcf607a89bf4a0c126d5f8a 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -90,7 +90,7 @@ struct header_struct { /* SNAP */ u8 oui[3]; __be16 ethertype; -} __attribute__ ((packed)); +} __packed; struct ez_usb_fw { u16 size; @@ -222,7 +222,7 @@ struct ezusb_packet { __le16 hermes_len; __le16 hermes_rid; u8 data[0]; -} __attribute__ ((packed)); +} __packed; /* Table of devices that work or may work with this driver */ static struct usb_device_id ezusb_table[] = { @@ -356,12 +356,10 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv, { struct request_context *ctx; - ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); if (!ctx) return NULL; - memset(ctx, 0, sizeof(*ctx)); - ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC); if (!ctx->buf) { kfree(ctx); @@ -1504,16 +1502,16 @@ static inline void ezusb_delete(struct ezusb_priv *upriv) ezusb_ctx_complete(list_entry(item, struct request_context, list)); - if (upriv->read_urb->status == -EINPROGRESS) + if (upriv->read_urb && upriv->read_urb->status == -EINPROGRESS) printk(KERN_ERR PFX "Some URB in progress\n"); mutex_unlock(&upriv->mtx); - kfree(upriv->read_urb->transfer_buffer); - if (upriv->bap_buf != NULL) - kfree(upriv->bap_buf); - if (upriv->read_urb != NULL) + if (upriv->read_urb) { + kfree(upriv->read_urb->transfer_buffer); usb_free_urb(upriv->read_urb); + } + kfree(upriv->bap_buf); if (upriv->dev) { struct orinoco_private *priv = ndev_priv(upriv->dev); orinoco_if_del(priv); diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 5775124e2aeefd57215324cb1c778ad983f8b024..cf7be1eb612495b699dc2e0cdc8fe17650c26736 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c @@ -128,7 +128,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev) } else { struct { __le16 qual, signal, noise, unused; - } __attribute__ ((packed)) cq; + } __packed cq; err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_COMMSQUALITY, &cq); @@ -993,11 +993,9 @@ static int orinoco_ioctl_set_genie(struct net_device *dev, return -EINVAL; if (wrqu->data.length) { - buf = kmalloc(wrqu->data.length, GFP_KERNEL); + buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); if (buf == NULL) return -ENOMEM; - - memcpy(buf, extra, wrqu->data.length); } else buf = NULL; diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c index 187e263b045ae813dae3fb59a7c0b27b9d85c4fe..d687cb7f2a599c28d04d6c875f5b4b2796604cdf 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/p54/eeprom.c @@ -149,16 +149,15 @@ static int p54_generate_band(struct ieee80211_hw *dev, continue; if (list->channels[i].data != CHAN_HAS_ALL) { - printk(KERN_ERR "%s:%s%s%s is/are missing for " - "channel:%d [%d MHz].\n", - wiphy_name(dev->wiphy), - (list->channels[i].data & CHAN_HAS_CAL ? "" : - " [iqauto calibration data]"), - (list->channels[i].data & CHAN_HAS_LIMIT ? "" : - " [output power limits]"), - (list->channels[i].data & CHAN_HAS_CURVE ? "" : - " [curve data]"), - list->channels[i].index, list->channels[i].freq); + wiphy_err(dev->wiphy, + "%s%s%s is/are missing for channel:%d [%d MHz].\n", + (list->channels[i].data & CHAN_HAS_CAL ? "" : + " [iqauto calibration data]"), + (list->channels[i].data & CHAN_HAS_LIMIT ? "" : + " [output power limits]"), + (list->channels[i].data & CHAN_HAS_CURVE ? "" : + " [curve data]"), + list->channels[i].index, list->channels[i].freq); continue; } @@ -168,9 +167,8 @@ static int p54_generate_band(struct ieee80211_hw *dev, } if (j == 0) { - printk(KERN_ERR "%s: Disabling totally damaged %s band.\n", - wiphy_name(dev->wiphy), (band == IEEE80211_BAND_2GHZ) ? - "2 GHz" : "5 GHz"); + wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n", + (band == IEEE80211_BAND_2GHZ) ? 2 : 5); ret = -ENODATA; goto err_out; @@ -244,9 +242,9 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev) if ((priv->iq_autocal_len != priv->curve_data->entries) || (priv->iq_autocal_len != priv->output_limit->entries)) - printk(KERN_ERR "%s: Unsupported or damaged EEPROM detected. " - "You may not be able to use all channels.\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, + "Unsupported or damaged EEPROM detected. " + "You may not be able to use all channels.\n"); max_channel_num = max_t(unsigned int, priv->output_limit->entries, priv->iq_autocal_len); @@ -419,15 +417,14 @@ static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len, int i; if (len != (entry_size * num_entries)) { - printk(KERN_ERR "%s: unknown rssi calibration data packing " - " type:(%x) len:%d.\n", - wiphy_name(dev->wiphy), type, len); + wiphy_err(dev->wiphy, + "unknown rssi calibration data packing type:(%x) len:%d.\n", + type, len); print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len); - printk(KERN_ERR "%s: please report this issue.\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "please report this issue.\n"); return; } @@ -445,15 +442,14 @@ static void p54_parse_default_country(struct ieee80211_hw *dev, struct pda_country *country; if (len != sizeof(*country)) { - printk(KERN_ERR "%s: found possible invalid default country " - "eeprom entry. (entry size: %d)\n", - wiphy_name(dev->wiphy), len); + wiphy_err(dev->wiphy, + "found possible invalid default country eeprom entry. (entry size: %d)\n", + len); print_hex_dump_bytes("country:", DUMP_PREFIX_NONE, data, len); - printk(KERN_ERR "%s: please report this issue.\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "please report this issue.\n"); return; } @@ -478,8 +474,8 @@ static int p54_convert_output_limits(struct ieee80211_hw *dev, return -EINVAL; if (data[0] != 0) { - printk(KERN_ERR "%s: unknown output power db revision:%x\n", - wiphy_name(dev->wiphy), data[0]); + wiphy_err(dev->wiphy, "unknown output power db revision:%x\n", + data[0]); return -EINVAL; } @@ -587,10 +583,9 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) err = p54_convert_rev1(dev, curve_data); break; default: - printk(KERN_ERR "%s: unknown curve data " - "revision %d\n", - wiphy_name(dev->wiphy), - curve_data->cal_method_rev); + wiphy_err(dev->wiphy, + "unknown curve data revision %d\n", + curve_data->cal_method_rev); err = -ENODEV; break; } @@ -599,13 +594,13 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) } break; case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: - priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); + priv->iq_autocal = kmemdup(entry->data, data_len, + GFP_KERNEL); if (!priv->iq_autocal) { err = -ENOMEM; goto err; } - memcpy(priv->iq_autocal, entry->data, data_len); priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); break; case PDR_DEFAULT_COUNTRY: @@ -672,8 +667,8 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) if (!synth || !priv->iq_autocal || !priv->output_limit || !priv->curve_data) { - printk(KERN_ERR "%s: not all required entries found in eeprom!\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, + "not all required entries found in eeprom!\n"); err = -EINVAL; goto err; } @@ -699,15 +694,15 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { u8 perm_addr[ETH_ALEN]; - printk(KERN_WARNING "%s: Invalid hwaddr! Using randomly generated MAC addr\n", - wiphy_name(dev->wiphy)); + wiphy_warn(dev->wiphy, + "invalid hwaddr! using randomly generated mac addr\n"); random_ether_addr(perm_addr); SET_IEEE80211_PERM_ADDR(dev, perm_addr); } - printk(KERN_INFO "%s: hwaddr %pM, MAC:isl38%02x RF:%s\n", - wiphy_name(dev->wiphy), dev->wiphy->perm_addr, priv->version, - p54_rf_chips[priv->rxhw]); + wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n", + dev->wiphy->perm_addr, priv->version, + p54_rf_chips[priv->rxhw]); return 0; @@ -719,8 +714,7 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) priv->output_limit = NULL; priv->curve_data = NULL; - printk(KERN_ERR "%s: eeprom parse failed!\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "eeprom parse failed!\n"); return err; } EXPORT_SYMBOL_GPL(p54_parse_eeprom); diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index c43a5d461ab21feb6a2e9ad98854bb929c06e770..47006bca485216609afb0f51a201c4d2ef1ef04a 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c @@ -62,16 +62,15 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) case FW_LM20: case FW_LM87: { char *iftype = (char *)bootrec->data; - printk(KERN_INFO "%s: p54 detected a LM%c%c " - "firmware\n", - wiphy_name(priv->hw->wiphy), - iftype[2], iftype[3]); + wiphy_info(priv->hw->wiphy, + "p54 detected a LM%c%c firmware\n", + iftype[2], iftype[3]); break; } case FW_FMAC: default: - printk(KERN_ERR "%s: unsupported firmware\n", - wiphy_name(priv->hw->wiphy)); + wiphy_err(priv->hw->wiphy, + "unsupported firmware\n"); return -ENODEV; } break; @@ -125,15 +124,15 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) } if (fw_version) - printk(KERN_INFO "%s: FW rev %s - Softmac protocol %x.%x\n", - wiphy_name(priv->hw->wiphy), fw_version, - priv->fw_var >> 8, priv->fw_var & 0xff); + wiphy_info(priv->hw->wiphy, + "fw rev %s - softmac protocol %x.%x\n", + fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); if (priv->fw_var < 0x500) - printk(KERN_INFO "%s: you are using an obsolete firmware. " - "visit http://wireless.kernel.org/en/users/Drivers/p54 " - "and grab one for \"kernel >= 2.6.28\"!\n", - wiphy_name(priv->hw->wiphy)); + wiphy_info(priv->hw->wiphy, + "you are using an obsolete firmware. " + "visit http://wireless.kernel.org/en/users/Drivers/p54 " + "and grab one for \"kernel >= 2.6.28\"!\n"); if (priv->fw_var >= 0x300) { /* Firmware supports QoS, use it! */ @@ -152,13 +151,14 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) priv->hw->queues = P54_QUEUE_AC_NUM; } - printk(KERN_INFO "%s: cryptographic accelerator " - "WEP:%s, TKIP:%s, CCMP:%s\n", wiphy_name(priv->hw->wiphy), - (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" : - "no", (priv->privacy_caps & (BR_DESC_PRIV_CAP_TKIP | - BR_DESC_PRIV_CAP_MICHAEL)) ? "YES" : "no", - (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) ? - "YES" : "no"); + wiphy_info(priv->hw->wiphy, + "cryptographic accelerator WEP:%s, TKIP:%s, CCMP:%s\n", + (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" : "no", + (priv->privacy_caps & + (BR_DESC_PRIV_CAP_TKIP | BR_DESC_PRIV_CAP_MICHAEL)) + ? "YES" : "no", + (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) + ? "YES" : "no"); if (priv->rx_keycache_size) { /* @@ -247,8 +247,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf, if (!wait_for_completion_interruptible_timeout( &priv->eeprom_comp, HZ)) { - printk(KERN_ERR "%s: device does not respond!\n", - wiphy_name(priv->hw->wiphy)); + wiphy_err(priv->hw->wiphy, "device does not respond!\n"); ret = -EBUSY; } priv->eeprom = NULL; @@ -523,9 +522,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) return 0; err: - printk(KERN_ERR "%s: frequency change to channel %d failed.\n", - wiphy_name(priv->hw->wiphy), ieee80211_frequency_to_channel( - priv->hw->conf.channel->center_freq)); + wiphy_err(priv->hw->wiphy, "frequency change to channel %d failed.\n", + ieee80211_frequency_to_channel( + priv->hw->conf.channel->center_freq)); dev_kfree_skb_any(skb); return -EINVAL; @@ -676,8 +675,8 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len, break; default: - printk(KERN_ERR "%s: invalid cryptographic algorithm: %d\n", - wiphy_name(priv->hw->wiphy), algo); + wiphy_err(priv->hw->wiphy, + "invalid cryptographic algorithm: %d\n", algo); dev_kfree_skb(skb); return -EINVAL; } diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c index 9575ac03363067aa3e6e3616a5d0fe08c1b5c63c..ea91f5cce6b3f62d31f8c5d45609e650a2cc40a7 100644 --- a/drivers/net/wireless/p54/led.c +++ b/drivers/net/wireless/p54/led.c @@ -57,8 +57,8 @@ static void p54_update_leds(struct work_struct *work) err = p54_set_leds(priv); if (err && net_ratelimit()) - printk(KERN_ERR "%s: failed to update LEDs (%d).\n", - wiphy_name(priv->hw->wiphy), err); + wiphy_err(priv->hw->wiphy, + "failed to update leds (%d).\n", err); if (rerun) ieee80211_queue_delayed_work(priv->hw, &priv->led_work, @@ -102,8 +102,8 @@ static int p54_register_led(struct p54_common *priv, err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); if (err) - printk(KERN_ERR "%s: Failed to register %s LED.\n", - wiphy_name(priv->hw->wiphy), name); + wiphy_err(priv->hw->wiphy, + "failed to register %s led.\n", name); else led->registered = 1; diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index c072f41747cac9cf1e0c6c9c37c4c01e53c41a7c..47db439b63bfd9d3f2b9330095b4c4b606160479 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -507,6 +507,22 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, return ret; } +static int p54_get_survey(struct ieee80211_hw *dev, int idx, + struct survey_info *survey) +{ + struct p54_common *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + + if (idx != 0) + return -ENOENT; + + survey->channel = conf->channel; + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = clamp_t(s8, priv->noise, -128, 127); + + return 0; +} + static const struct ieee80211_ops p54_ops = { .tx = p54_tx_80211, .start = p54_start, @@ -523,6 +539,7 @@ static const struct ieee80211_ops p54_ops = { .configure_filter = p54_configure_filter, .conf_tx = p54_conf_tx, .get_stats = p54_get_stats, + .get_survey = p54_get_survey, }; struct ieee80211_hw *p54_init_common(size_t priv_data_len) diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h index 4915d9d542034755d3e07e40b37d7185c324b7fc..e3ed893b5aaf7149beedca62a2af7b73b5d6669a 100644 --- a/drivers/net/wireless/p54/net2280.h +++ b/drivers/net/wireless/p54/net2280.h @@ -232,7 +232,7 @@ struct net2280_regs { #define GPIO2_INTERRUPT 2 #define GPIO1_INTERRUPT 1 #define GPIO0_INTERRUPT 0 -} __attribute__ ((packed)); +} __packed; /* usb control, BAR0 + 0x0080 */ struct net2280_usb_regs { @@ -296,7 +296,7 @@ struct net2280_usb_regs { #define FORCE_IMMEDIATE 7 #define OUR_USB_ADDRESS 0 __le32 ourconfig; -} __attribute__ ((packed)); +} __packed; /* pci control, BAR0 + 0x0100 */ struct net2280_pci_regs { @@ -323,7 +323,7 @@ struct net2280_pci_regs { #define PCI_ARBITER_CLEAR 2 #define PCI_EXTERNAL_ARBITER 1 #define PCI_HOST_MODE 0 -} __attribute__ ((packed)); +} __packed; /* dma control, BAR0 + 0x0180 ... array of four structs like this, * for channels 0..3. see also struct net2280_dma: descriptor @@ -364,7 +364,7 @@ struct net2280_dma_regs { /* [11.7] */ __le32 dmaaddr; __le32 dmadesc; u32 _unused1; -} __attribute__ ((packed)); +} __packed; /* dedicated endpoint registers, BAR0 + 0x0200 */ @@ -374,7 +374,7 @@ struct net2280_dep_regs { /* [11.8] */ /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ __le32 dep_rsp; u32 _unused[2]; -} __attribute__ ((packed)); +} __packed; /* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs * like this, for ep0 then the configurable endpoints A..F @@ -437,16 +437,16 @@ struct net2280_ep_regs { /* [11.9] */ __le32 ep_avail; __le32 ep_data; u32 _unused0[2]; -} __attribute__ ((packed)); +} __packed; struct net2280_reg_write { __le16 port; __le32 addr; __le32 val; -} __attribute__ ((packed)); +} __packed; struct net2280_reg_read { __le16 port; __le32 addr; -} __attribute__ ((packed)); +} __packed; #endif /* NET2280_H */ diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index a5ea89cde8c43a9c837018459e9f689a83162e1e..822f8dc26e9c051d3b9ee34e79ef715b0f204067 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -466,8 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev) P54P_READ(dev_int); if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { - printk(KERN_ERR "%s: Cannot boot firmware!\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "cannot boot firmware!\n"); p54p_stop(dev); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h index 2feead617a3bf151d075f32cd87b2e0787f16417..ee9bc62a4fa277345f951596e6c192f5e82c9a51 100644 --- a/drivers/net/wireless/p54/p54pci.h +++ b/drivers/net/wireless/p54/p54pci.h @@ -65,7 +65,7 @@ struct p54p_csr { u8 unused_6[1924]; u8 cardbus_cis[0x800]; u8 direct_mem_win[0x1000]; -} __attribute__ ((packed)); +} __packed; /* usb backend only needs the register defines above */ #ifndef P54USB_H @@ -74,7 +74,7 @@ struct p54p_desc { __le32 device_addr; __le16 len; __le16 flags; -} __attribute__ ((packed)); +} __packed; struct p54p_ring_control { __le32 host_idx[4]; @@ -83,7 +83,7 @@ struct p54p_ring_control { struct p54p_desc tx_data[32]; struct p54p_desc rx_mgmt[4]; struct p54p_desc tx_mgmt[4]; -} __attribute__ ((packed)); +} __packed; #define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r) #define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r) diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index c8f09da1f84df425dacdd5a29a55cb4479d4ad37..087bf0698a5af7aee75a8c47fac852fe4cd35fa1 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -697,9 +697,7 @@ static int __devexit p54spi_remove(struct spi_device *spi) static struct spi_driver p54spi_driver = { .driver = { - /* use cx3110x name because board-n800.c uses that for the - * SPI port */ - .name = "cx3110x", + .name = "p54spi", .bus = &spi_bus_type, .owner = THIS_MODULE, }, @@ -733,3 +731,4 @@ module_exit(p54spi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter "); MODULE_ALIAS("spi:cx3110x"); +MODULE_ALIAS("spi:p54spi"); diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h index 7fbe8d8fc67cba47c99be22b45d0f5c295ac7402..dfaa62aaeb079d643f13a5033904904f834b604c 100644 --- a/drivers/net/wireless/p54/p54spi.h +++ b/drivers/net/wireless/p54/p54spi.h @@ -96,7 +96,7 @@ struct p54s_dma_regs { __le16 cmd; __le16 len; __le32 addr; -} __attribute__ ((packed)); +} __packed; struct p54s_tx_info { struct list_head tx_list; diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 73073259f50814b36ab947e4fd58fc6293e6f459..ad595958b7df19f144a38671cc0424551aaba709 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -69,7 +69,8 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */ {USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/ {USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/ - {USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */ + /* {USB_DEVICE(0x0cde, 0x0006)}, * Medion MD40900 already listed above, + * just noting it here for clarity */ {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ {USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */ {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ @@ -434,10 +435,9 @@ static int p54u_firmware_reset_3887(struct ieee80211_hw *dev) u8 *buf; int ret; - buf = kmalloc(4, GFP_KERNEL); + buf = kmemdup(p54u_romboot_3887, 4, GFP_KERNEL); if (!buf) return -ENOMEM; - memcpy(buf, p54u_romboot_3887, 4); ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 4); kfree(buf); diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h index e935b79f7f7569cb1b33399acc9822dd6ff31a2c..ed4034ade59ab4efb01b615693149497cdfe04f2 100644 --- a/drivers/net/wireless/p54/p54usb.h +++ b/drivers/net/wireless/p54/p54usb.h @@ -70,12 +70,12 @@ struct net2280_tx_hdr { __le16 len; __le16 follower; /* ? */ u8 padding[8]; -} __attribute__((packed)); +} __packed; struct lm87_tx_hdr { __le32 device_addr; __le32 chksum; -} __attribute__((packed)); +} __packed; /* Some flags for the isl hardware registers controlling DMA inside the * chip */ @@ -103,7 +103,7 @@ struct x2_header { __le32 fw_load_addr; __le32 fw_length; __le32 crc; -} __attribute__((packed)); +} __packed; /* pipes 3 and 4 are not used by the driver */ #define P54U_PIPE_NUMBER 9 diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 4e6891099d43d98d960619004e41d953cba3c1b1..427b46f558ed4f57b0066a2ae312be0d0e82c7ae 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -38,8 +38,8 @@ static void p54_dump_tx_queue(struct p54_common *priv) u32 largest_hole = 0, free; spin_lock_irqsave(&priv->tx_queue.lock, flags); - printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) ---\n", - wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue)); + wiphy_debug(priv->hw->wiphy, "/ --- tx queue dump (%d entries) ---\n", + skb_queue_len(&priv->tx_queue)); prev_addr = priv->rx_start; skb_queue_walk(&priv->tx_queue, skb) { @@ -48,21 +48,23 @@ static void p54_dump_tx_queue(struct p54_common *priv) hdr = (void *) skb->data; free = range->start_addr - prev_addr; - printk(KERN_DEBUG "%s: | [%02d] => [skb:%p skb_len:0x%04x " - "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} " - "mem:{start:%04x end:%04x, free:%d}]\n", - wiphy_name(priv->hw->wiphy), i++, skb, skb->len, - le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len), - le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type), - range->start_addr, range->end_addr, free); + wiphy_debug(priv->hw->wiphy, + "| [%02d] => [skb:%p skb_len:0x%04x " + "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} " + "mem:{start:%04x end:%04x, free:%d}]\n", + i++, skb, skb->len, + le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len), + le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type), + range->start_addr, range->end_addr, free); prev_addr = range->end_addr; largest_hole = max(largest_hole, free); } free = priv->rx_end - prev_addr; largest_hole = max(largest_hole, free); - printk(KERN_DEBUG "%s: \\ --- [free: %d], largest free block: %d ---\n", - wiphy_name(priv->hw->wiphy), free, largest_hole); + wiphy_debug(priv->hw->wiphy, + "\\ --- [free: %d], largest free block: %d ---\n", + free, largest_hole); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); } #endif /* P54_MM_DEBUG */ @@ -538,8 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) case P54_TRAP_BEACON_TX: break; case P54_TRAP_RADAR: - printk(KERN_INFO "%s: radar (freq:%d MHz)\n", - wiphy_name(priv->hw->wiphy), freq); + wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq); break; case P54_TRAP_NO_BEACON: if (priv->vif) @@ -558,8 +559,8 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) wiphy_rfkill_set_hw_state(priv->hw->wiphy, false); break; default: - printk(KERN_INFO "%s: received event:%x freq:%d\n", - wiphy_name(priv->hw->wiphy), event, freq); + wiphy_info(priv->hw->wiphy, "received event:%x freq:%d\n", + event, freq); break; } } @@ -584,8 +585,9 @@ static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb) p54_rx_eeprom_readback(priv, skb); break; default: - printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n", - wiphy_name(priv->hw->wiphy), le16_to_cpu(hdr->type)); + wiphy_debug(priv->hw->wiphy, + "not handling 0x%02x type control frame\n", + le16_to_cpu(hdr->type)); break; } return 0; diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 8d1190c0f06241c1af9f7755c5500a6f1c48b94a..77cd65db8500b2363496721702777cd280e3bb15 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -2067,7 +2067,7 @@ send_simple_event(islpci_private *priv, const char *str) memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL); if (!memptr) return; - BUG_ON(n > IW_CUSTOM_MAX); + BUG_ON(n >= IW_CUSTOM_MAX); wrqu.data.pointer = memptr; wrqu.data.length = n; strcpy(memptr, str); @@ -2101,7 +2101,7 @@ struct ieee80211_beacon_phdr { u8 timestamp[8]; u16 beacon_int; u16 capab_info; -} __attribute__ ((packed)); +} __packed; #define WLAN_EID_GENERIC 0xdd static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; @@ -2751,14 +2751,9 @@ prism54_hostapd(struct net_device *ndev, struct iw_point *p) p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) return -EINVAL; - param = kmalloc(p->length, GFP_KERNEL); - if (param == NULL) - return -ENOMEM; - - if (copy_from_user(param, p->pointer, p->length)) { - kfree(param); - return -EFAULT; - } + param = memdup_user(p->pointer, p->length); + if (IS_ERR(param)) + return PTR_ERR(param); switch (param->cmd) { case PRISM2_SET_ENCRYPTION: diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h index b7534c2869c8d6d6b7fe970eeb8a1ae36020a109..59e31258d45096b418687b6c54208092f632c335 100644 --- a/drivers/net/wireless/prism54/isl_oid.h +++ b/drivers/net/wireless/prism54/isl_oid.h @@ -29,20 +29,20 @@ struct obj_ssid { u8 length; char octets[33]; -} __attribute__ ((packed)); +} __packed; struct obj_key { u8 type; /* dot11_priv_t */ u8 length; char key[32]; -} __attribute__ ((packed)); +} __packed; struct obj_mlme { u8 address[6]; u16 id; u16 state; u16 code; -} __attribute__ ((packed)); +} __packed; struct obj_mlmeex { u8 address[6]; @@ -51,12 +51,12 @@ struct obj_mlmeex { u16 code; u16 size; u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct obj_buffer { u32 size; u32 addr; /* 32bit bus address */ -} __attribute__ ((packed)); +} __packed; struct obj_bss { u8 address[6]; @@ -77,17 +77,17 @@ struct obj_bss { short rates; short basic_rates; int:16; /* padding */ -} __attribute__ ((packed)); +} __packed; struct obj_bsslist { u32 nr; struct obj_bss bsslist[0]; -} __attribute__ ((packed)); +} __packed; struct obj_frequencies { u16 nr; u16 mhz[0]; -} __attribute__ ((packed)); +} __packed; struct obj_attachment { char type; @@ -95,7 +95,7 @@ struct obj_attachment { short id; short size; char data[0]; -} __attribute__((packed)); +} __packed; /* * in case everything's ok, the inlined function below will be diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h index 54f9a4b7bf9b0f5e5973db0953b088e44572fc0e..6ca30a5b7bfbc0463a440c19ca5d8aad026d5abe 100644 --- a/drivers/net/wireless/prism54/islpci_eth.h +++ b/drivers/net/wireless/prism54/islpci_eth.h @@ -34,13 +34,13 @@ struct rfmon_header { __le16 unk3; u8 rssi; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; struct rx_annex_header { u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; struct rfmon_header rfmon; -} __attribute__ ((packed)); +} __packed; /* wlan-ng (and hopefully others) AVS header, version one. Fields in * network byte order. */ diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h index 0b27e50fe0d5420d2d0273801bdecec4f4d7c61f..0db93db9b6753efed4354a47408baa98e153c4b4 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.h +++ b/drivers/net/wireless/prism54/islpci_mgt.h @@ -101,7 +101,7 @@ typedef struct { u8 device_id; u8 flags; u32 length; -} __attribute__ ((packed)) +} __packed pimfor_header_t; /* A received and interrupt-processed management frame, either for diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index abff8934db13a1e02f62e56b7ff0561c42891be5..9c38fc331dca7966a6d643d37aaef20079cdd8b6 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -97,7 +97,6 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev); static const struct iw_handler_def ray_handler_def; /***** Prototypes for raylink functions **************************************/ -static int asc_to_int(char a); static void authenticate(ray_dev_t *local); static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type); static void authenticate_timeout(u_long); @@ -1716,24 +1715,6 @@ static void authenticate_timeout(u_long data) join_net((u_long) local); } -/*===========================================================================*/ -static int asc_to_int(char a) -{ - if (a < '0') - return -1; - if (a <= '9') - return (a - '0'); - if (a < 'A') - return -1; - if (a <= 'F') - return (10 + a - 'A'); - if (a < 'a') - return -1; - if (a <= 'f') - return (10 + a - 'a'); - return -1; -} - /*===========================================================================*/ static int parse_addr(char *in_str, UCHAR *out) { @@ -1754,14 +1735,14 @@ static int parse_addr(char *in_str, UCHAR *out) i = 5; while (j > 0) { - if ((k = asc_to_int(in_str[j--])) != -1) + if ((k = hex_to_bin(in_str[j--])) != -1) out[i] = k; else return 0; if (j == 0) break; - if ((k = asc_to_int(in_str[j--])) != -1) + if ((k = hex_to_bin(in_str[j--])) != -1) out[i] += k << 4; else return 0; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 4bd61ee627c08f49f0b2519e16c7a74f31cf27ca..719573bbbf81fce6e21560465702cbff814847f4 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -238,19 +238,19 @@ struct ndis_80211_auth_request { u8 bssid[6]; u8 padding[2]; __le32 flags; -} __attribute__((packed)); +} __packed; struct ndis_80211_pmkid_candidate { u8 bssid[6]; u8 padding[2]; __le32 flags; -} __attribute__((packed)); +} __packed; struct ndis_80211_pmkid_cand_list { __le32 version; __le32 num_candidates; struct ndis_80211_pmkid_candidate candidate_list[0]; -} __attribute__((packed)); +} __packed; struct ndis_80211_status_indication { __le32 status_type; @@ -260,19 +260,19 @@ struct ndis_80211_status_indication { struct ndis_80211_auth_request auth_request[0]; struct ndis_80211_pmkid_cand_list cand_list; } u; -} __attribute__((packed)); +} __packed; struct ndis_80211_ssid { __le32 length; u8 essid[NDIS_802_11_LENGTH_SSID]; -} __attribute__((packed)); +} __packed; struct ndis_80211_conf_freq_hop { __le32 length; __le32 hop_pattern; __le32 hop_set; __le32 dwell_time; -} __attribute__((packed)); +} __packed; struct ndis_80211_conf { __le32 length; @@ -280,7 +280,7 @@ struct ndis_80211_conf { __le32 atim_window; __le32 ds_config; struct ndis_80211_conf_freq_hop fh_config; -} __attribute__((packed)); +} __packed; struct ndis_80211_bssid_ex { __le32 length; @@ -295,25 +295,25 @@ struct ndis_80211_bssid_ex { u8 rates[NDIS_802_11_LENGTH_RATES_EX]; __le32 ie_length; u8 ies[0]; -} __attribute__((packed)); +} __packed; struct ndis_80211_bssid_list_ex { __le32 num_items; struct ndis_80211_bssid_ex bssid[0]; -} __attribute__((packed)); +} __packed; struct ndis_80211_fixed_ies { u8 timestamp[8]; __le16 beacon_interval; __le16 capabilities; -} __attribute__((packed)); +} __packed; struct ndis_80211_wep_key { __le32 size; __le32 index; __le32 length; u8 material[32]; -} __attribute__((packed)); +} __packed; struct ndis_80211_key { __le32 size; @@ -323,14 +323,14 @@ struct ndis_80211_key { u8 padding[6]; u8 rsc[8]; u8 material[32]; -} __attribute__((packed)); +} __packed; struct ndis_80211_remove_key { __le32 size; __le32 index; u8 bssid[6]; u8 padding[2]; -} __attribute__((packed)); +} __packed; struct ndis_config_param { __le32 name_offs; @@ -338,7 +338,7 @@ struct ndis_config_param { __le32 type; __le32 value_offs; __le32 value_length; -} __attribute__((packed)); +} __packed; struct ndis_80211_assoc_info { __le32 length; @@ -358,12 +358,12 @@ struct ndis_80211_assoc_info { } resp_ie; __le32 resp_ie_length; __le32 offset_resp_ies; -} __attribute__((packed)); +} __packed; struct ndis_80211_auth_encr_pair { __le32 auth_mode; __le32 encr_mode; -} __attribute__((packed)); +} __packed; struct ndis_80211_capability { __le32 length; @@ -371,7 +371,7 @@ struct ndis_80211_capability { __le32 num_pmkids; __le32 num_auth_encr_pair; struct ndis_80211_auth_encr_pair auth_encr_pair[0]; -} __attribute__((packed)); +} __packed; struct ndis_80211_bssid_info { u8 bssid[6]; @@ -520,8 +520,9 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev, static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed); -static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, - int dbm); +static int rndis_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, + int mbm); static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm); static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, @@ -1856,20 +1857,25 @@ static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed) return 0; } -static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, - int dbm) +static int rndis_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, + int mbm) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; - netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n", - __func__, type, dbm); + netdev_dbg(usbdev->net, "%s(): type:0x%x mbm:%i\n", + __func__, type, mbm); + + if (mbm < 0 || (mbm % 100)) + return -ENOTSUPP; /* Device doesn't support changing txpower after initialization, only * turn off/on radio. Support 'auto' mode and setting same dBm that is * currently used. */ - if (type == TX_POWER_AUTOMATIC || dbm == get_bcm4320_power_dbm(priv)) { + if (type == NL80211_TX_POWER_AUTOMATIC || + MBM_TO_DBM(mbm) == get_bcm4320_power_dbm(priv)) { if (!priv->radio_on) disassociate(usbdev, true); /* turn on radio */ @@ -2495,8 +2501,7 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); - struct ndis_80211_assoc_info *info; - u8 assoc_buf[sizeof(*info) + IW_CUSTOM_MAX + 32]; + struct ndis_80211_assoc_info *info = NULL; u8 bssid[ETH_ALEN]; int resp_ie_len, req_ie_len; u8 *req_ie, *resp_ie; @@ -2515,23 +2520,43 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) resp_ie = NULL; if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { - memset(assoc_buf, 0, sizeof(assoc_buf)); - info = (void *)assoc_buf; + info = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); + if (!info) { + /* No memory? Try resume work later */ + set_bit(WORK_LINK_UP, &priv->work_pending); + queue_work(priv->workqueue, &priv->work); + return; + } - /* Get association info IEs from device and send them back to - * userspace. */ - ret = get_association_info(usbdev, info, sizeof(assoc_buf)); + /* Get association info IEs from device. */ + ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE); if (!ret) { req_ie_len = le32_to_cpu(info->req_ie_length); if (req_ie_len > 0) { offset = le32_to_cpu(info->offset_req_ies); + + if (offset > CONTROL_BUFFER_SIZE) + offset = CONTROL_BUFFER_SIZE; + req_ie = (u8 *)info + offset; + + if (offset + req_ie_len > CONTROL_BUFFER_SIZE) + req_ie_len = + CONTROL_BUFFER_SIZE - offset; } resp_ie_len = le32_to_cpu(info->resp_ie_length); if (resp_ie_len > 0) { offset = le32_to_cpu(info->offset_resp_ies); + + if (offset > CONTROL_BUFFER_SIZE) + offset = CONTROL_BUFFER_SIZE; + resp_ie = (u8 *)info + offset; + + if (offset + resp_ie_len > CONTROL_BUFFER_SIZE) + resp_ie_len = + CONTROL_BUFFER_SIZE - offset; } } } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC)) @@ -2563,6 +2588,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); + if (info != NULL) + kfree(info); + priv->connected = true; memcpy(priv->bssid, bssid, ETH_ALEN); diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index ad2c98af7e9d62090dd54837206d07bcd23675a6..5063e01410e5b76640946aea9d1722d0543c2aa5 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -586,9 +586,11 @@ static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev, static inline void rt2400pci_set_vgc(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, u8 vgc_level) { - rt2400pci_bbp_write(rt2x00dev, 13, vgc_level); - qual->vgc_level = vgc_level; - qual->vgc_level_reg = vgc_level; + if (qual->vgc_level_reg != vgc_level) { + rt2400pci_bbp_write(rt2x00dev, 13, vgc_level); + qual->vgc_level = vgc_level; + qual->vgc_level_reg = vgc_level; + } } static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev, @@ -877,7 +879,8 @@ static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev, static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_OFF); + int mask = (state == STATE_RADIO_IRQ_OFF) || + (state == STATE_RADIO_IRQ_OFF_ISR); u32 reg; /* @@ -978,7 +981,9 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt2400pci_toggle_rx(rt2x00dev, state); break; case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: + case STATE_RADIO_IRQ_OFF_ISR: rt2400pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -1076,9 +1081,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; - struct queue_entry_priv_pci *entry_priv = entry->priv_data; - struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); - u32 word; u32 reg; /* @@ -1091,9 +1093,15 @@ static void rt2400pci_write_beacon(struct queue_entry *entry, rt2x00queue_map_txskb(rt2x00dev, entry->skb); - rt2x00_desc_read(entry_priv->desc, 1, &word); - rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); - rt2x00_desc_write(entry_priv->desc, 1, word); + /* + * Write the TX descriptor for the beacon. + */ + rt2400pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); + + /* + * Dump beacon to userspace through debugfs. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); /* * Enable beaconing again. @@ -1230,23 +1238,10 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, } } -static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) +static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg; - - /* - * Get the interrupt sources & saved to local variable. - * Write register value back to clear pending interrupts. - */ - rt2x00pci_register_read(rt2x00dev, CSR7, ®); - rt2x00pci_register_write(rt2x00dev, CSR7, reg); - - if (!reg) - return IRQ_NONE; - - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) - return IRQ_HANDLED; + u32 reg = rt2x00dev->irqvalue[0]; /* * Handle interrupts, walk through all bits @@ -1284,9 +1279,40 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) rt2400pci_txdone(rt2x00dev, QID_AC_BK); + /* Enable interrupts again. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_ON_ISR); return IRQ_HANDLED; } +static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* Store irqvalues for use in the interrupt thread. */ + rt2x00dev->irqvalue[0] = reg; + + /* Disable interrupts, will be enabled again in the interrupt thread. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_OFF_ISR); + + return IRQ_WAKE_THREAD; +} + /* * Device probe functions. */ @@ -1396,8 +1422,8 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Check if the BBP tuning should be enabled. */ - if (!rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) - __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) + __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); return 0; } @@ -1563,7 +1589,8 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = { .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, - .set_tim = rt2x00mac_set_tim, + .sw_scan_start = rt2x00mac_sw_scan_start, + .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2400pci_conf_tx, @@ -1574,6 +1601,7 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = { static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { .irq_handler = rt2400pci_interrupt, + .irq_handler_thread = rt2400pci_interrupt_thread, .probe_hw = rt2400pci_probe_hw, .initialize = rt2x00pci_initialize, .uninitialize = rt2x00pci_uninitialize, @@ -1585,7 +1613,6 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { .reset_tuner = rt2400pci_reset_tuner, .link_tuner = rt2400pci_link_tuner, .write_tx_desc = rt2400pci_write_tx_desc, - .write_tx_data = rt2x00pci_write_tx_data, .write_beacon = rt2400pci_write_beacon, .kick_tx_queue = rt2400pci_kick_tx_queue, .kill_tx_queue = rt2400pci_kill_tx_queue, diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 41da3d218c65e343b5ded898ad5cee89257e4201..c2a555d5376b3bbe4738813225a695d8fd81eac0 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -626,6 +626,7 @@ static inline void rt2500pci_set_vgc(struct rt2x00_dev *rt2x00dev, { if (qual->vgc_level_reg != vgc_level) { rt2500pci_bbp_write(rt2x00dev, 17, vgc_level); + qual->vgc_level = vgc_level; qual->vgc_level_reg = vgc_level; } } @@ -700,13 +701,10 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev, * R17 is inside the dynamic tuning range, * start tuning the link based on the false cca counter. */ - if (qual->false_cca > 512 && qual->vgc_level_reg < 0x40) { + if (qual->false_cca > 512 && qual->vgc_level_reg < 0x40) rt2500pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level_reg); - qual->vgc_level = qual->vgc_level_reg; - } else if (qual->false_cca < 100 && qual->vgc_level_reg > 0x32) { + else if (qual->false_cca < 100 && qual->vgc_level_reg > 0x32) rt2500pci_set_vgc(rt2x00dev, qual, --qual->vgc_level_reg); - qual->vgc_level = qual->vgc_level_reg; - } } /* @@ -1035,7 +1033,8 @@ static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev, static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_OFF); + int mask = (state == STATE_RADIO_IRQ_OFF) || + (state == STATE_RADIO_IRQ_OFF_ISR); u32 reg; /* @@ -1136,7 +1135,9 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt2500pci_toggle_rx(rt2x00dev, state); break; case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: + case STATE_RADIO_IRQ_OFF_ISR: rt2500pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -1233,9 +1234,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; - struct queue_entry_priv_pci *entry_priv = entry->priv_data; - struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); - u32 word; u32 reg; /* @@ -1248,9 +1246,15 @@ static void rt2500pci_write_beacon(struct queue_entry *entry, rt2x00queue_map_txskb(rt2x00dev, entry->skb); - rt2x00_desc_read(entry_priv->desc, 1, &word); - rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); - rt2x00_desc_write(entry_priv->desc, 1, word); + /* + * Write the TX descriptor for the beacon. + */ + rt2500pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); + + /* + * Dump beacon to userspace through debugfs. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); /* * Enable beaconing again. @@ -1366,23 +1370,10 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, } } -static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) +static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg; - - /* - * Get the interrupt sources & saved to local variable. - * Write register value back to clear pending interrupts. - */ - rt2x00pci_register_read(rt2x00dev, CSR7, ®); - rt2x00pci_register_write(rt2x00dev, CSR7, reg); - - if (!reg) - return IRQ_NONE; - - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) - return IRQ_HANDLED; + u32 reg = rt2x00dev->irqvalue[0]; /* * Handle interrupts, walk through all bits @@ -1420,9 +1411,41 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) rt2500pci_txdone(rt2x00dev, QID_AC_BK); + /* Enable interrupts again. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_ON_ISR); + return IRQ_HANDLED; } +static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* Store irqvalues for use in the interrupt thread. */ + rt2x00dev->irqvalue[0] = reg; + + /* Disable interrupts, will be enabled again in the interrupt thread. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_OFF_ISR); + + return IRQ_WAKE_THREAD; +} + /* * Device probe functions. */ @@ -1554,9 +1577,8 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) * Check if the BBP tuning should be enabled. */ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); - - if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) - __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + if (!rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) + __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); /* * Read the RSSI <-> dBm offset information. @@ -1861,7 +1883,8 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = { .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, - .set_tim = rt2x00mac_set_tim, + .sw_scan_start = rt2x00mac_sw_scan_start, + .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, @@ -1872,6 +1895,7 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = { static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { .irq_handler = rt2500pci_interrupt, + .irq_handler_thread = rt2500pci_interrupt_thread, .probe_hw = rt2500pci_probe_hw, .initialize = rt2x00pci_initialize, .uninitialize = rt2x00pci_uninitialize, @@ -1883,7 +1907,6 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { .reset_tuner = rt2500pci_reset_tuner, .link_tuner = rt2500pci_link_tuner, .write_tx_desc = rt2500pci_write_tx_desc, - .write_tx_data = rt2x00pci_write_tx_data, .write_beacon = rt2500pci_write_beacon, .kick_tx_queue = rt2500pci_kick_tx_queue, .kill_tx_queue = rt2500pci_kill_tx_queue, diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 9ae96a626e6d0fafc6b8a633af03c75a135c5b3d..cdaf93f48263c012649b30d6b72604ac0b853f64 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -345,11 +345,19 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { - int timeout; u32 mask; u16 reg; + enum cipher curr_cipher; if (crypto->cmd == SET_KEY) { + /* + * Disallow to set WEP key other than with index 0, + * it is known that not work at least on some hardware. + * SW crypto will be used in that case. + */ + if (key->alg == ALG_WEP && key->keyidx != 0) + return -EOPNOTSUPP; + /* * Pairwise key will always be entry 0, but this * could collide with a shared key on the same @@ -358,6 +366,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, mask = TXRX_CSR0_KEY_ID.bit_mask; rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®); + curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM); reg &= mask; if (reg && reg == mask) @@ -366,19 +375,17 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); key->hw_key_idx += reg ? ffz(reg) : 0; - /* - * The encryption key doesn't fit within the CSR cache, - * this means we should allocate it separately and use - * rt2x00usb_vendor_request() to send the key to the hardware. + * Hardware requires that all keys use the same cipher + * (e.g. TKIP-only, AES-only, but not TKIP+AES). + * If this is not the first key, compare the cipher with the + * first one and fall back to SW crypto if not the same. */ - reg = KEY_ENTRY(key->hw_key_idx); - timeout = REGISTER_TIMEOUT32(sizeof(crypto->key)); - rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, - USB_VENDOR_REQUEST_OUT, reg, - crypto->key, - sizeof(crypto->key), - timeout); + if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher) + return -EOPNOTSUPP; + + rt2500usb_register_multiwrite(rt2x00dev, KEY_ENTRY(key->hw_key_idx), + crypto->key, sizeof(crypto->key)); /* * The driver does not support the IV/EIV generation @@ -818,6 +825,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field16(®, TXRX_CSR0_ALGORITHM, CIPHER_NONE); rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); @@ -1005,7 +1013,9 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, rt2500usb_toggle_rx(rt2x00dev, state); break; case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: + case STATE_RADIO_IRQ_OFF_ISR: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: @@ -1034,7 +1044,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); - __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE); + __le32 *txd = (__le32 *) skb->data; u32 word; /* @@ -1080,6 +1090,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, /* * Register descriptor details in skb frame descriptor. */ + skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } @@ -1108,9 +1119,20 @@ static void rt2500usb_write_beacon(struct queue_entry *entry, rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); /* - * Take the descriptor in front of the skb into account. + * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); + memset(entry->skb->data, 0, TXD_DESC_SIZE); + + /* + * Write the TX descriptor for the beacon. + */ + rt2500usb_write_tx_desc(rt2x00dev, entry->skb, txdesc); + + /* + * Dump beacon to userspace through debugfs. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); /* * USB devices cannot blindly pass the skb->len as the @@ -1459,13 +1481,6 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); - /* - * Check if the BBP tuning should be disabled. - */ - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); - if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) - __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); - /* * Read the RSSI <-> dBm offset information. */ @@ -1732,7 +1747,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags); } - __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); /* * Set the rssi offset. @@ -1752,6 +1767,8 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = { .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, + .sw_scan_start = rt2x00mac_sw_scan_start, + .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, @@ -1767,8 +1784,8 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { .rfkill_poll = rt2500usb_rfkill_poll, .link_stats = rt2500usb_link_stats, .reset_tuner = rt2500usb_reset_tuner, + .watchdog = rt2x00usb_watchdog, .write_tx_desc = rt2500usb_write_tx_desc, - .write_tx_data = rt2x00usb_write_tx_data, .write_beacon = rt2500usb_write_beacon, .get_tx_data_len = rt2500usb_get_tx_data_len, .kick_tx_queue = rt2x00usb_kick_tx_queue, diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 2aa03751c341d18848f668beb26868bdc252abb5..ed4ebcdde7c9c4155c58807cd5aae839a39ea63e 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -63,7 +63,6 @@ */ #define REV_RT2860C 0x0100 #define REV_RT2860D 0x0101 -#define REV_RT2870D 0x0101 #define REV_RT2872E 0x0200 #define REV_RT3070E 0x0200 #define REV_RT3070F 0x0201 @@ -75,7 +74,7 @@ * Signal information. * Default offset is required for RSSI <-> dBm conversion. */ -#define DEFAULT_RSSI_OFFSET 120 /* FIXME */ +#define DEFAULT_RSSI_OFFSET 120 /* * Register layout information. @@ -98,6 +97,21 @@ * Registers. */ +/* + * E2PROM_CSR: PCI EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE: 0: 93c46, 1:93c66. + * LOAD_STATUS: 1:loading, 0:done. + */ +#define E2PROM_CSR 0x0004 +#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001) +#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002) +#define E2PROM_CSR_DATA_IN FIELD32(0x00000004) +#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008) +#define E2PROM_CSR_TYPE FIELD32(0x00000030) +#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040) +#define E2PROM_CSR_RELOAD FIELD32(0x00000080) + /* * OPT_14: Unknown register used by rt3xxx devices. */ @@ -321,6 +335,39 @@ #define RX_CRX_IDX 0x0298 #define RX_DRX_IDX 0x029c +/* + * USB_DMA_CFG + * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns. + * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes. + * PHY_CLEAR: phy watch dog enable. + * TX_CLEAR: Clear USB DMA TX path. + * TXOP_HALT: Halt TXOP count down when TX buffer is full. + * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation. + * RX_BULK_EN: Enable USB DMA Rx. + * TX_BULK_EN: Enable USB DMA Tx. + * EP_OUT_VALID: OUT endpoint data valid. + * RX_BUSY: USB DMA RX FSM busy. + * TX_BUSY: USB DMA TX FSM busy. + */ +#define USB_DMA_CFG 0x02a0 +#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff) +#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00) +#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000) +#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000) +#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000) +#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000) +#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000) +#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000) +#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000) +#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000) +#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000) + +/* + * US_CYC_CNT + */ +#define US_CYC_CNT 0x02a4 +#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff) + /* * PBF_SYS_CTRL * HOST_RAM_WRITE: enable Host program ram write selection @@ -672,14 +719,20 @@ #define TBTT_TIMER 0x1124 /* - * INT_TIMER_CFG: + * INT_TIMER_CFG: timer configuration + * PRE_TBTT_TIMER: leadtime to tbtt for pretbtt interrupt in units of 1/16 TU + * GP_TIMER: period of general purpose timer in units of 1/16 TU */ #define INT_TIMER_CFG 0x1128 +#define INT_TIMER_CFG_PRE_TBTT_TIMER FIELD32(0x0000ffff) +#define INT_TIMER_CFG_GP_TIMER FIELD32(0xffff0000) /* * INT_TIMER_EN: GP-timer and pre-tbtt Int enable */ #define INT_TIMER_EN 0x112c +#define INT_TIMER_EN_PRE_TBTT_TIMER FIELD32(0x00000001) +#define INT_TIMER_EN_GP_TIMER FIELD32(0x00000002) /* * CH_IDLE_STA: channel idle time @@ -755,6 +808,18 @@ */ #define EDCA_TID_AC_MAP 0x1310 +/* + * TX_PWR_CFG: + */ +#define TX_PWR_CFG_RATE0 FIELD32(0x0000000f) +#define TX_PWR_CFG_RATE1 FIELD32(0x000000f0) +#define TX_PWR_CFG_RATE2 FIELD32(0x00000f00) +#define TX_PWR_CFG_RATE3 FIELD32(0x0000f000) +#define TX_PWR_CFG_RATE4 FIELD32(0x000f0000) +#define TX_PWR_CFG_RATE5 FIELD32(0x00f00000) +#define TX_PWR_CFG_RATE6 FIELD32(0x0f000000) +#define TX_PWR_CFG_RATE7 FIELD32(0xf0000000) + /* * TX_PWR_CFG_0: */ @@ -1370,17 +1435,17 @@ struct mac_wcid_entry { u8 mac[6]; u8 reserved[2]; -} __attribute__ ((packed)); +} __packed; struct hw_key_entry { u8 key[16]; u8 tx_mic[8]; u8 rx_mic[8]; -} __attribute__ ((packed)); +} __packed; struct mac_iveiv_entry { u8 iv[8]; -} __attribute__ ((packed)); +} __packed; /* * MAC_WCID_ATTRIBUTE: @@ -1389,6 +1454,10 @@ struct mac_iveiv_entry { #define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e) #define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070) #define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380) +#define MAC_WCID_ATTRIBUTE_CIPHER_EXT FIELD32(0x00000400) +#define MAC_WCID_ATTRIBUTE_BSS_IDX_EXT FIELD32(0x00000800) +#define MAC_WCID_ATTRIBUTE_WAPI_MCBC FIELD32(0x00008000) +#define MAC_WCID_ATTRIBUTE_WAPI_KEY_IDX FIELD32(0xff000000) /* * SHARED_KEY_MODE: @@ -1510,7 +1579,9 @@ struct mac_iveiv_entry { */ /* - * BBP 1: TX Antenna + * BBP 1: TX Antenna & Power + * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm, + * 3 - increase tx power by 6dBm */ #define BBP1_TX_POWER FIELD8(0x07) #define BBP1_TX_ANTENNA FIELD8(0x18) @@ -1800,9 +1871,15 @@ struct mac_iveiv_entry { #define EEPROM_TXPOWER_A_2 FIELD16(0xff00) /* - * EEPROM TXpower byrate: 20MHZ power + * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode */ #define EEPROM_TXPOWER_BYRATE 0x006f +#define EEPROM_TXPOWER_BYRATE_SIZE 9 + +#define EEPROM_TXPOWER_BYRATE_RATE0 FIELD16(0x000f) +#define EEPROM_TXPOWER_BYRATE_RATE1 FIELD16(0x00f0) +#define EEPROM_TXPOWER_BYRATE_RATE2 FIELD16(0x0f00) +#define EEPROM_TXPOWER_BYRATE_RATE3 FIELD16(0xf000) /* * EEPROM BBP. diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index db4250d1c8b35cc68abfe078a10753d5ad97e198..b66e0fd8f0fa178f2d18544a2dc3e192c7c00b93 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1,9 +1,9 @@ /* + Copyright (C) 2010 Ivo van Doorn Copyright (C) 2009 Bartlomiej Zolnierkiewicz Copyright (C) 2009 Gertjan van Wingerde Based on the original rt2800pci.c and rt2800usb.c. - Copyright (C) 2009 Ivo van Doorn Copyright (C) 2009 Alban Browaeys Copyright (C) 2009 Felix Fietkau Copyright (C) 2009 Luis Correia @@ -33,21 +33,14 @@ Abstract: rt2800 generic device routines. */ +#include #include #include #include #include "rt2x00.h" -#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE) -#include "rt2x00usb.h" -#endif #include "rt2800lib.h" #include "rt2800.h" -#include "rt2800usb.h" - -MODULE_AUTHOR("Bartlomiej Zolnierkiewicz"); -MODULE_DESCRIPTION("rt2800 library"); -MODULE_LICENSE("GPL"); /* * Register access. @@ -107,8 +100,7 @@ static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, BBP_CSR_CFG_REGNUM, word); rt2x00_set_field32(®, BBP_CSR_CFG_BUSY, 1); rt2x00_set_field32(®, BBP_CSR_CFG_READ_CONTROL, 0); - if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) - rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1); + rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1); rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); } @@ -136,8 +128,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, BBP_CSR_CFG_REGNUM, word); rt2x00_set_field32(®, BBP_CSR_CFG_BUSY, 1); rt2x00_set_field32(®, BBP_CSR_CFG_READ_CONTROL, 1); - if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) - rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1); + rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1); rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); @@ -282,9 +273,162 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) } EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); -void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc) +static bool rt2800_check_firmware_crc(const u8 *data, const size_t len) +{ + u16 fw_crc; + u16 crc; + + /* + * The last 2 bytes in the firmware array are the crc checksum itself, + * this means that we should never pass those 2 bytes to the crc + * algorithm. + */ + fw_crc = (data[len - 2] << 8 | data[len - 1]); + + /* + * Use the crc ccitt algorithm. + * This will return the same value as the legacy driver which + * used bit ordering reversion on the both the firmware bytes + * before input input as well as on the final output. + * Obviously using crc ccitt directly is much more efficient. + */ + crc = crc_ccitt(~0, data, len - 2); + + /* + * There is a small difference between the crc-itu-t + bitrev and + * the crc-ccitt crc calculation. In the latter method the 2 bytes + * will be swapped, use swab16 to convert the crc to the correct + * value. + */ + crc = swab16(crc); + + return fw_crc == crc; +} + +int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + size_t offset = 0; + size_t fw_len; + bool multiple; + + /* + * PCI(e) & SOC devices require firmware with a length + * of 8kb. USB devices require firmware files with a length + * of 4kb. Certain USB chipsets however require different firmware, + * which Ralink only provides attached to the original firmware + * file. Thus for USB devices, firmware files have a length + * which is a multiple of 4kb. + */ + if (rt2x00_is_usb(rt2x00dev)) { + fw_len = 4096; + multiple = true; + } else { + fw_len = 8192; + multiple = true; + } + + /* + * Validate the firmware length + */ + if (len != fw_len && (!multiple || (len % fw_len) != 0)) + return FW_BAD_LENGTH; + + /* + * Check if the chipset requires one of the upper parts + * of the firmware. + */ + if (rt2x00_is_usb(rt2x00dev) && + !rt2x00_rt(rt2x00dev, RT2860) && + !rt2x00_rt(rt2x00dev, RT2872) && + !rt2x00_rt(rt2x00dev, RT3070) && + ((len / fw_len) == 1)) + return FW_BAD_VERSION; + + /* + * 8kb firmware files must be checked as if it were + * 2 separate firmware files. + */ + while (offset < len) { + if (!rt2800_check_firmware_crc(data + offset, fw_len)) + return FW_BAD_CRC; + + offset += fw_len; + } + + return FW_OK; +} +EXPORT_SYMBOL_GPL(rt2800_check_firmware); + +int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + unsigned int i; + u32 reg; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg && reg != ~0) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + if (rt2x00_is_pci(rt2x00dev)) + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); + + /* + * Disable DMA, will be reenabled later when enabling + * the radio. + */ + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + /* + * Write firmware to the device. + */ + rt2800_drv_write_firmware(rt2x00dev, data, len); + + /* + * Wait for device to stabilize. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); + if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "PBF system register not ready.\n"); + return -EBUSY; + } + + /* + * Initialize firmware. + */ + rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + msleep(1); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_load_firmware); + +void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc) { - __le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE); u32 word; /* @@ -336,9 +480,53 @@ void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc) } EXPORT_SYMBOL_GPL(rt2800_write_txwi); -void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) +static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2) { - __le32 *rxwi = (__le32 *) skb->data; + int rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0); + int rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1); + int rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2); + u16 eeprom; + u8 offset0; + u8 offset1; + u8 offset2; + + if (rt2x00dev->rx_status.band == IEEE80211_BAND_2GHZ) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); + offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); + offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); + offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2); + } else { + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom); + offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0); + offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1); + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); + offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2); + } + + /* + * Convert the value from the descriptor into the RSSI value + * If the value in the descriptor is 0, it is considered invalid + * and the default (extremely low) rssi value is assumed + */ + rssi0 = (rssi0) ? (-12 - offset0 - rt2x00dev->lna_gain - rssi0) : -128; + rssi1 = (rssi1) ? (-12 - offset1 - rt2x00dev->lna_gain - rssi1) : -128; + rssi2 = (rssi2) ? (-12 - offset2 - rt2x00dev->lna_gain - rssi2) : -128; + + /* + * mac80211 only accepts a single RSSI value. Calculating the + * average doesn't deliver a fair answer either since -60:-60 would + * be considered equally good as -50:-70 while the second is the one + * which gives less energy... + */ + rssi0 = max(rssi0, rssi1); + return max(rssi0, rssi2); +} + +void rt2800_process_rxwi(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + __le32 *rxwi = (__le32 *) entry->skb->data; u32 word; rt2x00_desc_read(rxwi, 0, &word); @@ -369,17 +557,93 @@ void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) rt2x00_desc_read(rxwi, 2, &word); - rxdesc->rssi = - (rt2x00_get_field32(word, RXWI_W2_RSSI0) + - rt2x00_get_field32(word, RXWI_W2_RSSI1)) / 2; + /* + * Convert descriptor AGC value to RSSI value. + */ + rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word); /* * Remove RXWI descriptor from start of buffer. */ - skb_pull(skb, RXWI_DESC_SIZE); + skb_pull(entry->skb, RXWI_DESC_SIZE); } EXPORT_SYMBOL_GPL(rt2800_process_rxwi); +void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + unsigned int beacon_base; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + /* + * Add space for the TXWI in front of the skb. + */ + skb_push(entry->skb, TXWI_DESC_SIZE); + memset(entry->skb, 0, TXWI_DESC_SIZE); + + /* + * Register descriptor details in skb frame descriptor. + */ + skbdesc->flags |= SKBDESC_DESC_IN_SKB; + skbdesc->desc = entry->skb->data; + skbdesc->desc_len = TXWI_DESC_SIZE; + + /* + * Add the TXWI for the beacon to the skb. + */ + rt2800_write_txwi((__le32 *)entry->skb->data, txdesc); + + /* + * Dump beacon to userspace through debugfs. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + + /* + * Write entire beacon with TXWI to register. + */ + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); + rt2800_register_multiwrite(rt2x00dev, beacon_base, + entry->skb->data, entry->skb->len); + + /* + * Enable beaconing again. + */ + rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); + rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + /* + * Clean up beacon skb. + */ + dev_kfree_skb_any(entry->skb); + entry->skb = NULL; +} +EXPORT_SYMBOL_GPL(rt2800_write_beacon); + +static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev, + unsigned int beacon_base) +{ + int i; + + /* + * For the Beacon base registers we only need to clear + * the whole TXWI which (when set to 0) will invalidate + * the entire beacon. + */ + for (i = 0; i < TXWI_DESC_SIZE; i += sizeof(__le32)) + rt2800_register_write(rt2x00dev, beacon_base + i, 0); +} + #ifdef CONFIG_RT2X00_LIB_DEBUGFS const struct rt2x00debug rt2800_rt2x00debug = { .owner = THIS_MODULE, @@ -502,15 +766,28 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev, offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx); - rt2800_register_read(rt2x00dev, offset, ®); - rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_KEYTAB, - !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); - rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER, - (crypto->cmd == SET_KEY) * crypto->cipher); - rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX, - (crypto->cmd == SET_KEY) * crypto->bssidx); - rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher); - rt2800_register_write(rt2x00dev, offset, reg); + if (crypto->cmd == SET_KEY) { + rt2800_register_read(rt2x00dev, offset, ®); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_KEYTAB, + !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + /* + * Both the cipher as the BSS Idx numbers are split in a main + * value of 3 bits, and a extended field for adding one additional + * bit to the value. + */ + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER, + (crypto->cipher & 0x7)); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER_EXT, + (crypto->cipher & 0x8) >> 3); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX, + (crypto->bssidx & 0x7)); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT, + (crypto->bssidx & 0x8) >> 3); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher); + rt2800_register_write(rt2x00dev, offset, reg); + } else { + rt2800_register_write(rt2x00dev, offset, 0); + } offset = MAC_IVEIV_ENTRY(key->hw_key_idx); @@ -668,19 +945,14 @@ EXPORT_SYMBOL_GPL(rt2800_config_filter); void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { - unsigned int beacon_base; u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Clear current synchronisation setup. - * For the Beacon base registers we only need to clear - * the first byte since that byte contains the VALID and OWNER - * bits which (when set to 0) will invalidate the entire beacon. */ - beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); - rt2800_register_write(rt2x00dev, beacon_base, 0); - + rt2800_clear_beacon(rt2x00dev, + HW_BEACON_OFFSET(intf->beacon->entry_idx)); /* * Enable synchronisation. */ @@ -688,8 +960,18 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, conf->sync); rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, - (conf->sync == TSF_SYNC_BEACON)); + (conf->sync == TSF_SYNC_ADHOC || + conf->sync == TSF_SYNC_AP_NONE)); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + /* + * Enable pre tbtt interrupt for beaconing modes + */ + rt2800_register_read(rt2x00dev, INT_TIMER_EN, ®); + rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, + (conf->sync == TSF_SYNC_AP_NONE)); + rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg); + } if (flags & CONFIG_UPDATE_MAC) { @@ -703,8 +985,8 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, if (flags & CONFIG_UPDATE_BSSID) { reg = le32_to_cpu(conf->bssid[1]); - rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_ID_MASK, 0); - rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_BCN_NUM, 0); + rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_ID_MASK, 3); + rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_BCN_NUM, 7); conf->bssid[1] = cpu_to_le32(reg); rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0, @@ -762,14 +1044,12 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) switch ((int)ant->tx) { case 1: rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); - if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) - rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); break; case 2: rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); break; case 3: - /* Do nothing */ + rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); break; } @@ -1016,66 +1296,115 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, } static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, - const int txpower) + const int max_txpower) { + u8 txpower; + u8 max_value = (u8)max_txpower; + u16 eeprom; + int i; u32 reg; - u32 value = TXPOWER_G_TO_DEV(txpower); u8 r1; + u32 offset; + /* + * set to normal tx power mode: +/- 0dBm + */ rt2800_bbp_read(rt2x00dev, 1, &r1); - rt2x00_set_field8(®, BBP1_TX_POWER, 0); + rt2x00_set_field8(&r1, BBP1_TX_POWER, 0); rt2800_bbp_write(rt2x00dev, 1, r1); - rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, ®); - rt2x00_set_field32(®, TX_PWR_CFG_0_1MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_0_2MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_0_55MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_0_11MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_0_6MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_0_9MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_0_12MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_0_18MBS, value); - rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, reg); - - rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, ®); - rt2x00_set_field32(®, TX_PWR_CFG_1_24MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_1_36MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_1_48MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_1_54MBS, value); - rt2x00_set_field32(®, TX_PWR_CFG_1_MCS0, value); - rt2x00_set_field32(®, TX_PWR_CFG_1_MCS1, value); - rt2x00_set_field32(®, TX_PWR_CFG_1_MCS2, value); - rt2x00_set_field32(®, TX_PWR_CFG_1_MCS3, value); - rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, reg); - - rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, ®); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS4, value); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS5, value); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS6, value); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS7, value); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS8, value); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS9, value); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS10, value); - rt2x00_set_field32(®, TX_PWR_CFG_2_MCS11, value); - rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, reg); - - rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, ®); - rt2x00_set_field32(®, TX_PWR_CFG_3_MCS12, value); - rt2x00_set_field32(®, TX_PWR_CFG_3_MCS13, value); - rt2x00_set_field32(®, TX_PWR_CFG_3_MCS14, value); - rt2x00_set_field32(®, TX_PWR_CFG_3_MCS15, value); - rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN1, value); - rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN2, value); - rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN3, value); - rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN4, value); - rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, reg); - - rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, ®); - rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN5, value); - rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN6, value); - rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN7, value); - rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN8, value); - rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, reg); + /* + * The eeprom contains the tx power values for each rate. These + * values map to 100% tx power. Each 16bit word contains four tx + * power values and the order is the same as used in the TX_PWR_CFG + * registers. + */ + offset = TX_PWR_CFG_0; + + for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { + /* just to be safe */ + if (offset > TX_PWR_CFG_4) + break; + + rt2800_register_read(rt2x00dev, offset, ®); + + /* read the next four txpower values */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, + &eeprom); + + /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS, + * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE0); + rt2x00_set_field32(®, TX_PWR_CFG_RATE0, + min(txpower, max_value)); + + /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS, + * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE1); + rt2x00_set_field32(®, TX_PWR_CFG_RATE1, + min(txpower, max_value)); + + /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS, + * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE2); + rt2x00_set_field32(®, TX_PWR_CFG_RATE2, + min(txpower, max_value)); + + /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS, + * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE3); + rt2x00_set_field32(®, TX_PWR_CFG_RATE3, + min(txpower, max_value)); + + /* read the next four txpower values */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, + &eeprom); + + /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0, + * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE0); + rt2x00_set_field32(®, TX_PWR_CFG_RATE4, + min(txpower, max_value)); + + /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1, + * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE1); + rt2x00_set_field32(®, TX_PWR_CFG_RATE5, + min(txpower, max_value)); + + /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2, + * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE2); + rt2x00_set_field32(®, TX_PWR_CFG_RATE6, + min(txpower, max_value)); + + /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3, + * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown, + * TX_PWR_CFG_4: unknown */ + txpower = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_BYRATE_RATE3); + rt2x00_set_field32(®, TX_PWR_CFG_RATE7, + min(txpower, max_value)); + + rt2800_register_write(rt2x00dev, offset, reg); + + /* next TX_PWR_CFG register */ + offset += 4; + } } static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev, @@ -1212,6 +1541,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) u32 reg; u16 eeprom; unsigned int i; + int ret; rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); @@ -1221,59 +1551,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); - if (rt2x00_is_usb(rt2x00dev)) { - /* - * Wait until BBP and RF are ready. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, MAC_CSR0, ®); - if (reg && reg != ~0) - break; - msleep(1); - } - - if (i == REGISTER_BUSY_COUNT) { - ERROR(rt2x00dev, "Unstable hardware.\n"); - return -EBUSY; - } - - rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, - reg & ~0x00002000); - } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) { - /* - * Reset DMA indexes - */ - rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); - rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); - - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); - - rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); - } - - rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); - rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); - rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); - rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); - - if (rt2x00_is_usb(rt2x00dev)) { - rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); -#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE) - rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, - USB_MODE_RESET, REGISTER_TIMEOUT); -#endif - } - - rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); + ret = rt2800_drv_init_registers(rt2x00dev); + if (ret) + return ret; rt2800_register_read(rt2x00dev, BCN_OFFSET0, ®); rt2x00_set_field32(®, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */ @@ -1295,7 +1575,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); - rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 0); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 1600); rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0); rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, 0); rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0); @@ -1328,7 +1608,6 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) } else { rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } - rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg); } else if (rt2x00_rt(rt2x00dev, RT3070)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); @@ -1339,6 +1618,10 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } + } else if (rt2800_is_305x_soc(rt2x00dev)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); + rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -1546,23 +1829,20 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) /* * Clear all beacons - * For the Beacon base registers we only need to clear - * the first byte since that byte contains the VALID and OWNER - * bits which (when set to 0) will invalidate the entire beacon. - */ - rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0); - rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0); - rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0); - rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0); - rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0); - rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0); - rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0); - rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0); + */ + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0); + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1); + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2); + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3); + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4); + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5); + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6); + rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7); if (rt2x00_is_usb(rt2x00dev)) { - rt2800_register_read(rt2x00dev, USB_CYC_CFG, ®); - rt2x00_set_field32(®, USB_CYC_CFG_CLOCK_CYCLE, 30); - rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg); + rt2800_register_read(rt2x00dev, US_CYC_CNT, ®); + rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 30); + rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); } rt2800_register_read(rt2x00dev, HT_FBK_CFG0, ®); @@ -1617,6 +1897,13 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, TX_STA_CNT1, ®); rt2800_register_read(rt2x00dev, TX_STA_CNT2, ®); + /* + * Setup leadtime for pre tbtt interrupt to 6ms + */ + rt2800_register_read(rt2x00dev, INT_TIMER_CFG, ®); + rt2x00_set_field32(®, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4); + rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg); + return 0; } EXPORT_SYMBOL_GPL(rt2800_init_registers); @@ -1706,8 +1993,7 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x6a); - if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) || - rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D)) + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) rt2800_bbp_write(rt2x00dev, 84, 0x19); else rt2800_bbp_write(rt2x00dev, 84, 0x99); @@ -2013,8 +2299,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); - if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); } rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); @@ -2147,7 +2432,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); } else if (rt2x00_rt(rt2x00dev, RT2860) || - rt2x00_rt(rt2x00dev, RT2870) || rt2x00_rt(rt2x00dev, RT2872)) { /* * There is a max of 2 RX streams for RT28x0 series @@ -2169,6 +2453,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0); rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0); rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0); + rt2x00_set_field16(&word, EEPROM_NIC_ANT_DIVERSITY, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DAC_TEST, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); } @@ -2176,6 +2462,10 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); if ((word & 0x00ff) == 0x00ff) { rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + if ((word & 0xff00) == 0xff00) { rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE, LED_MODE_TXRX_ACTIVITY); rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0); @@ -2183,7 +2473,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555); rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221); rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8); - EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word); } /* @@ -2251,7 +2541,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); if (!rt2x00_rt(rt2x00dev, RT2860) && - !rt2x00_rt(rt2x00dev, RT2870) && !rt2x00_rt(rt2x00dev, RT2872) && !rt2x00_rt(rt2x00dev, RT2883) && !rt2x00_rt(rt2x00dev, RT3070) && @@ -2484,13 +2773,26 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_PS_NULLFUNC_STACK; + IEEE80211_HW_PS_NULLFUNC_STACK | + IEEE80211_HW_AMPDU_AGGREGATION; SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); + /* + * As rt2800 has a global fallback table we cannot specify + * more then one tx rate per frame but since the hw will + * try several rates (based on the fallback table) we should + * still initialize max_rates to the maximum number of rates + * we are going to try. Otherwise mac80211 will truncate our + * reported tx rates and the rc algortihm will end up with + * incorrect data. + */ + rt2x00dev->hw->max_rates = 7; + rt2x00dev->hw->max_rate_tries = 1; + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); /* @@ -2528,16 +2830,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) else spec->ht.ht_supported = false; - /* - * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes - * reception problems with HT40 capable 11n APs - */ spec->ht.cap = + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_TX_STBC | - IEEE80211_HT_CAP_RX_STBC; + IEEE80211_HT_CAP_SGI_40; + + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) >= 2) + spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC; + + spec->ht.cap |= + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) << + IEEE80211_HT_CAP_RX_STBC_SHIFT; + spec->ht.ampdu_factor = 3; spec->ht.ampdu_density = 4; spec->ht.mcs.tx_params = @@ -2591,8 +2896,8 @@ EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode); /* * IEEE80211 stack callback functions. */ -static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, - u32 *iv32, u16 *iv16) +void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32, + u16 *iv16) { struct rt2x00_dev *rt2x00dev = hw->priv; struct mac_iveiv_entry iveiv_entry; @@ -2605,8 +2910,9 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16)); memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32)); } +EXPORT_SYMBOL_GPL(rt2800_get_tkip_seq); -static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct rt2x00_dev *rt2x00dev = hw->priv; u32 reg; @@ -2642,9 +2948,10 @@ static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) return 0; } +EXPORT_SYMBOL_GPL(rt2800_set_rts_threshold); -static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, - const struct ieee80211_tx_queue_params *params) +int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, + const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; @@ -2709,8 +3016,9 @@ static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, return 0; } +EXPORT_SYMBOL_GPL(rt2800_conf_tx); -static u64 rt2800_get_tsf(struct ieee80211_hw *hw) +u64 rt2800_get_tsf(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; u64 tsf; @@ -2723,23 +3031,37 @@ static u64 rt2800_get_tsf(struct ieee80211_hw *hw) return tsf; } +EXPORT_SYMBOL_GPL(rt2800_get_tsf); -const struct ieee80211_ops rt2800_mac80211_ops = { - .tx = rt2x00mac_tx, - .start = rt2x00mac_start, - .stop = rt2x00mac_stop, - .add_interface = rt2x00mac_add_interface, - .remove_interface = rt2x00mac_remove_interface, - .config = rt2x00mac_config, - .configure_filter = rt2x00mac_configure_filter, - .set_tim = rt2x00mac_set_tim, - .set_key = rt2x00mac_set_key, - .get_stats = rt2x00mac_get_stats, - .get_tkip_seq = rt2800_get_tkip_seq, - .set_rts_threshold = rt2800_set_rts_threshold, - .bss_info_changed = rt2x00mac_bss_info_changed, - .conf_tx = rt2800_conf_tx, - .get_tsf = rt2800_get_tsf, - .rfkill_poll = rt2x00mac_rfkill_poll, -}; -EXPORT_SYMBOL_GPL(rt2800_mac80211_ops); +int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + int ret = 0; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + /* we don't support RX aggregation yet */ + ret = -ENOTSUPP; + break; + case IEEE80211_AMPDU_TX_START: + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP: + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + break; + default: + WARNING((struct rt2x00_dev *)hw->priv, "Unknown AMPDU action\n"); + } + + return ret; +} +EXPORT_SYMBOL_GPL(rt2800_ampdu_action); + +MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz"); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2800 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h index 94de999e229032841ea9793b89e4ffd7f4810354..091641e3c5e2ec95eb4d89449490f7b45c41d18d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/rt2x00/rt2800lib.h @@ -40,13 +40,17 @@ struct rt2800_ops { int (*regbusy_read)(struct rt2x00_dev *rt2x00dev, const unsigned int offset, const struct rt2x00_field32 field, u32 *reg); + + int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len); + int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev); }; static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 *value) { - const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; rt2800ops->register_read(rt2x00dev, offset, value); } @@ -55,7 +59,7 @@ static inline void rt2800_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 *value) { - const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; rt2800ops->register_read_lock(rt2x00dev, offset, value); } @@ -64,7 +68,7 @@ static inline void rt2800_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 value) { - const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; rt2800ops->register_write(rt2x00dev, offset, value); } @@ -73,7 +77,7 @@ static inline void rt2800_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 value) { - const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; rt2800ops->register_write_lock(rt2x00dev, offset, value); } @@ -82,7 +86,7 @@ static inline void rt2800_register_multiread(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u32 length) { - const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; rt2800ops->register_multiread(rt2x00dev, offset, value, length); } @@ -92,7 +96,7 @@ static inline void rt2800_register_multiwrite(struct rt2x00_dev *rt2x00dev, const void *value, const u32 length) { - const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; rt2800ops->register_multiwrite(rt2x00dev, offset, value, length); } @@ -102,17 +106,39 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev, const struct rt2x00_field32 field, u32 *reg) { - const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg); } +static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; + + return rt2800ops->drv_write_firmware(rt2x00dev, data, len); +} + +static inline int rt2800_drv_init_registers(struct rt2x00_dev *rt2x00dev) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv; + + return rt2800ops->drv_init_registers(rt2x00dev); +} + void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1); -void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc); -void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc); +int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len); +int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len); + +void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc); +void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc); + +void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); extern const struct rt2x00debug rt2800_rt2x00debug; @@ -148,6 +174,14 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev); int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev); int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev); -extern const struct ieee80211_ops rt2800_mac80211_ops; +void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32, + u16 *iv16); +int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value); +int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, + const struct ieee80211_tx_queue_params *params); +u64 rt2800_get_tsf(struct ieee80211_hw *hw); +int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); #endif /* RT2800LIB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index b2f23272c3aae3efa74524f10b894bc88d521637..39b3846fa340a83704a69d8a252bb7a6d6fa87b5 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -31,7 +31,6 @@ Supported chipsets: RT2800E & RT2800ED. */ -#include #include #include #include @@ -51,7 +50,7 @@ /* * Allow hardware encryption to be disabled. */ -static int modparam_nohwcrypt = 1; +static int modparam_nohwcrypt = 0; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); @@ -139,8 +138,18 @@ static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) eeprom.data = rt2x00dev; eeprom.register_read = rt2800pci_eepromregister_read; eeprom.register_write = rt2800pci_eepromregister_write; - eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ? - PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + switch (rt2x00_get_field32(reg, E2PROM_CSR_TYPE)) + { + case 0: + eeprom.width = PCI_EEPROM_WIDTH_93C46; + break; + case 1: + eeprom.width = PCI_EEPROM_WIDTH_93C66; + break; + default: + eeprom.width = PCI_EEPROM_WIDTH_93C86; + break; + } eeprom.reg_data_in = 0; eeprom.reg_data_out = 0; eeprom.reg_data_clock = 0; @@ -182,81 +191,13 @@ static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) return FIRMWARE_RT2860; } -static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev, +static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { - u16 fw_crc; - u16 crc; - - /* - * Only support 8kb firmware files. - */ - if (len != 8192) - return FW_BAD_LENGTH; - - /* - * The last 2 bytes in the firmware array are the crc checksum itself, - * this means that we should never pass those 2 bytes to the crc - * algorithm. - */ - fw_crc = (data[len - 2] << 8 | data[len - 1]); - - /* - * Use the crc ccitt algorithm. - * This will return the same value as the legacy driver which - * used bit ordering reversion on the both the firmware bytes - * before input input as well as on the final output. - * Obviously using crc ccitt directly is much more efficient. - */ - crc = crc_ccitt(~0, data, len - 2); - - /* - * There is a small difference between the crc-itu-t + bitrev and - * the crc-ccitt crc calculation. In the latter method the 2 bytes - * will be swapped, use swab16 to convert the crc to the correct - * value. - */ - crc = swab16(crc); - - return (fw_crc == crc) ? FW_OK : FW_BAD_CRC; -} - -static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev, - const u8 *data, const size_t len) -{ - unsigned int i; u32 reg; - /* - * Wait for stable hardware. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, MAC_CSR0, ®); - if (reg && reg != ~0) - break; - msleep(1); - } - - if (i == REGISTER_BUSY_COUNT) { - ERROR(rt2x00dev, "Unstable hardware.\n"); - return -EBUSY; - } - - rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000); - /* - * Disable DMA, will be reenabled later when enabling - * the radio. - */ - rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); - rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); - /* * enable Host program ram write selection */ @@ -268,34 +209,11 @@ static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev, * Write firmware to device. */ rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, - data, len); + data, len); rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000); rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001); - /* - * Wait for device to stabilize. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); - if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) - break; - msleep(1); - } - - if (i == REGISTER_BUSY_COUNT) { - ERROR(rt2x00dev, "PBF system register not ready.\n"); - return -EBUSY; - } - - /* - * Disable interrupts - */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF); - - /* - * Initialize BBP R/W access agent - */ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); @@ -412,7 +330,8 @@ static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev, static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_ON); + int mask = (state == STATE_RADIO_IRQ_ON) || + (state == STATE_RADIO_IRQ_ON_ISR); u32 reg; /* @@ -446,6 +365,38 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); } +static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Reset DMA indexes + */ + rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); + rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); + + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); + + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); + rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); + + return 0; +} + static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg; @@ -465,7 +416,7 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) /* * Send signal to firmware during boot time. */ - rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); + rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); /* * Enable RX. @@ -589,7 +540,9 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt2800pci_toggle_rx(rt2x00dev, state); break; case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: + case STATE_RADIO_IRQ_OFF_ISR: rt2800pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -613,18 +566,12 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev, /* * TX descriptor initialization */ -static int rt2800pci_write_tx_data(struct queue_entry* entry, - struct txentry_desc *txdesc) +static void rt2800pci_write_tx_data(struct queue_entry* entry, + struct txentry_desc *txdesc) { - int ret; - - ret = rt2x00pci_write_tx_data(entry, txdesc); - if (ret) - return ret; + __le32 *txwi = (__le32 *) entry->skb->data; - rt2800_write_txwi(entry->skb, txdesc); - - return 0; + rt2800_write_txwi(txwi, txdesc); } @@ -684,49 +631,6 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, /* * TX data initialization */ -static void rt2800pci_write_beacon(struct queue_entry *entry, - struct txentry_desc *txdesc) -{ - struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; - unsigned int beacon_base; - u32 reg; - - /* - * Disable beaconing while we are reloading the beacon data, - * otherwise we might be sending out invalid data. - */ - rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); - rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); - rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); - - /* - * Add the TXWI for the beacon to the skb. - */ - rt2800_write_txwi(entry->skb, txdesc); - skb_push(entry->skb, TXWI_DESC_SIZE); - - /* - * Write entire beacon with TXWI to register. - */ - beacon_base = HW_BEACON_OFFSET(entry->entry_idx); - rt2800_register_multiwrite(rt2x00dev, beacon_base, - entry->skb->data, entry->skb->len); - - /* - * Enable beaconing again. - */ - rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); - rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); - rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); - rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); - - /* - * Clean up beacon skb. - */ - dev_kfree_skb_any(entry->skb); - entry->skb = NULL; -} - static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, const enum data_queue_qid queue_idx) { @@ -812,7 +716,7 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, /* * Process the RXWI structure that is at the start of the buffer. */ - rt2800_process_rxwi(entry->skb, rxdesc); + rt2800_process_rxwi(entry, rxdesc); /* * Set RX IDX in register to inform hardware that we have handled @@ -832,29 +736,24 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) struct txdone_entry_desc txdesc; u32 word; u32 reg; - u32 old_reg; int wcid, ack, pid, tx_wcid, tx_ack, tx_pid; u16 mcs, real_mcs; + int i; /* - * During each loop we will compare the freshly read - * TX_STA_FIFO register value with the value read from - * the previous loop. If the 2 values are equal then - * we should stop processing because the chance it - * quite big that the device has been unplugged and - * we risk going into an endless loop. + * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO + * at most X times and also stop processing once the TX_STA_FIFO_VALID + * flag is not set anymore. + * + * The legacy drivers use X=TX_RING_SIZE but state in a comment + * that the TX_STA_FIFO stack has a size of 16. We stick to our + * tx ring size for now. */ - old_reg = 0; - - while (1) { + for (i = 0; i < TX_ENTRIES; i++) { rt2800_register_read(rt2x00dev, TX_STA_FIFO, ®); if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID)) break; - if (old_reg == reg) - break; - old_reg = reg; - wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); @@ -880,8 +779,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) /* Check if we got a match by looking at WCID/ACK/PID * fields */ - txwi = (__le32 *)(entry->skb->data - - rt2x00dev->ops->extra_tx_headroom); + txwi = (__le32 *) entry->skb->data; rt2x00_desc_read(txwi, 1, &word); tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID); @@ -923,8 +821,12 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) txdesc.retry = 7; } - __set_bit(TXDONE_FALLBACK, &txdesc.flags); - + /* + * the frame was retried at least once + * -> hw used fallback rates + */ + if (txdesc.retry) + __set_bit(TXDONE_FALLBACK, &txdesc.flags); rt2x00lib_txdone(entry, &txdesc); } @@ -938,6 +840,48 @@ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev) rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } +static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg = rt2x00dev->irqvalue[0]; + + /* + * 1 - Pre TBTT interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) + rt2x00lib_pretbtt(rt2x00dev); + + /* + * 2 - Beacondone interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) + rt2x00lib_beacondone(rt2x00dev); + + /* + * 3 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 4 - Tx done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) + rt2800pci_txdone(rt2x00dev); + + /* + * 5 - Auto wakeup interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) + rt2800pci_wakeup(rt2x00dev); + + /* Enable interrupts again. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_ON_ISR); + + return IRQ_HANDLED; +} + static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; @@ -953,19 +897,15 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; - /* - * 1 - Rx ring done interrupt. - */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) - rt2x00pci_rxdone(rt2x00dev); + /* Store irqvalue for use in the interrupt thread. */ + rt2x00dev->irqvalue[0] = reg; - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) - rt2800pci_txdone(rt2x00dev); + /* Disable interrupts, will be enabled again in the interrupt thread. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_OFF_ISR); - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) - rt2800pci_wakeup(rt2x00dev); - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } /* @@ -986,24 +926,10 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) return rt2800_validate_eeprom(rt2x00dev); } -static const struct rt2800_ops rt2800pci_rt2800_ops = { - .register_read = rt2x00pci_register_read, - .register_read_lock = rt2x00pci_register_read, /* same for PCI */ - .register_write = rt2x00pci_register_write, - .register_write_lock = rt2x00pci_register_write, /* same for PCI */ - - .register_multiread = rt2x00pci_register_multiread, - .register_multiwrite = rt2x00pci_register_multiwrite, - - .regbusy_read = rt2x00pci_regbusy_read, -}; - static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; - rt2x00dev->priv = (void *)&rt2800pci_rt2800_ops; - /* * Allocate eeprom data. */ @@ -1029,6 +955,12 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags); + /* + * This device has a pre tbtt interrupt and thus fetches + * a new beacon directly prior to transmission. + */ + __set_bit(DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, &rt2x00dev->flags); + /* * This device requires firmware. */ @@ -1038,6 +970,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); if (!modparam_nohwcrypt) __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); /* * Set the rssi offset. @@ -1047,12 +980,46 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) return 0; } +static const struct ieee80211_ops rt2800pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_key = rt2x00mac_set_key, + .sw_scan_start = rt2x00mac_sw_scan_start, + .sw_scan_complete = rt2x00mac_sw_scan_complete, + .get_stats = rt2x00mac_get_stats, + .get_tkip_seq = rt2800_get_tkip_seq, + .set_rts_threshold = rt2800_set_rts_threshold, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt2800_conf_tx, + .get_tsf = rt2800_get_tsf, + .rfkill_poll = rt2x00mac_rfkill_poll, + .ampdu_action = rt2800_ampdu_action, +}; + +static const struct rt2800_ops rt2800pci_rt2800_ops = { + .register_read = rt2x00pci_register_read, + .register_read_lock = rt2x00pci_register_read, /* same for PCI */ + .register_write = rt2x00pci_register_write, + .register_write_lock = rt2x00pci_register_write, /* same for PCI */ + .register_multiread = rt2x00pci_register_multiread, + .register_multiwrite = rt2x00pci_register_multiwrite, + .regbusy_read = rt2x00pci_regbusy_read, + .drv_write_firmware = rt2800pci_write_firmware, + .drv_init_registers = rt2800pci_init_registers, +}; + static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .irq_handler = rt2800pci_interrupt, + .irq_handler_thread = rt2800pci_interrupt_thread, .probe_hw = rt2800pci_probe_hw, .get_firmware_name = rt2800pci_get_firmware_name, - .check_firmware = rt2800pci_check_firmware, - .load_firmware = rt2800pci_load_firmware, + .check_firmware = rt2800_check_firmware, + .load_firmware = rt2800_load_firmware, .initialize = rt2x00pci_initialize, .uninitialize = rt2x00pci_uninitialize, .get_entry_state = rt2800pci_get_entry_state, @@ -1064,7 +1031,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .link_tuner = rt2800_link_tuner, .write_tx_desc = rt2800pci_write_tx_desc, .write_tx_data = rt2800pci_write_tx_data, - .write_beacon = rt2800pci_write_beacon, + .write_beacon = rt2800_write_beacon, .kick_tx_queue = rt2800pci_kick_tx_queue, .kill_tx_queue = rt2800pci_kill_tx_queue, .fill_rxdone = rt2800pci_fill_rxdone, @@ -1110,7 +1077,8 @@ static const struct rt2x00_ops rt2800pci_ops = { .tx = &rt2800pci_queue_tx, .bcn = &rt2800pci_queue_bcn, .lib = &rt2800pci_rt2x00_ops, - .hw = &rt2800_mac80211_ops, + .drv = &rt2800pci_rt2800_ops, + .hw = &rt2800pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2800_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h index afc8e7da27cbe32d135f5299b7e22efcaa9e5e18..5a8dda9b5b5a66c0d6b798dcd837be20c1cc29e4 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.h +++ b/drivers/net/wireless/rt2x00/rt2800pci.h @@ -34,25 +34,6 @@ #ifndef RT2800PCI_H #define RT2800PCI_H -/* - * PCI registers. - */ - -/* - * E2PROM_CSR: EEPROM control register. - * RELOAD: Write 1 to reload eeprom content. - * TYPE: 0: 93c46, 1:93c66. - * LOAD_STATUS: 1:loading, 0:done. - */ -#define E2PROM_CSR 0x0004 -#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001) -#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002) -#define E2PROM_CSR_DATA_IN FIELD32(0x00000004) -#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008) -#define E2PROM_CSR_TYPE FIELD32(0x00000030) -#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040) -#define E2PROM_CSR_RELOAD FIELD32(0x00000080) - /* * Queue register offset macros */ diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 0f8b84b7224cace0167ebe5b0065da28af08986c..5a2dfe87c6b61b5f48ec9d55ad95fea5208ab4ca 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -28,7 +28,6 @@ Supported chipsets: RT2800U. */ -#include #include #include #include @@ -45,7 +44,7 @@ /* * Allow hardware encryption to be disabled. */ -static int modparam_nohwcrypt = 1; +static int modparam_nohwcrypt = 0; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); @@ -57,84 +56,10 @@ static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) return FIRMWARE_RT2870; } -static bool rt2800usb_check_crc(const u8 *data, const size_t len) -{ - u16 fw_crc; - u16 crc; - - /* - * The last 2 bytes in the firmware array are the crc checksum itself, - * this means that we should never pass those 2 bytes to the crc - * algorithm. - */ - fw_crc = (data[len - 2] << 8 | data[len - 1]); - - /* - * Use the crc ccitt algorithm. - * This will return the same value as the legacy driver which - * used bit ordering reversion on the both the firmware bytes - * before input input as well as on the final output. - * Obviously using crc ccitt directly is much more efficient. - */ - crc = crc_ccitt(~0, data, len - 2); - - /* - * There is a small difference between the crc-itu-t + bitrev and - * the crc-ccitt crc calculation. In the latter method the 2 bytes - * will be swapped, use swab16 to convert the crc to the correct - * value. - */ - crc = swab16(crc); - - return fw_crc == crc; -} - -static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev, +static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { - size_t offset = 0; - - /* - * Firmware files: - * There are 2 variations of the rt2870 firmware. - * a) size: 4kb - * b) size: 8kb - * Note that (b) contains 2 separate firmware blobs of 4k - * within the file. The first blob is the same firmware as (a), - * but the second blob is for the additional chipsets. - */ - if (len != 4096 && len != 8192) - return FW_BAD_LENGTH; - - /* - * Check if we need the upper 4kb firmware data or not. - */ - if ((len == 4096) && - !rt2x00_rt(rt2x00dev, RT2860) && - !rt2x00_rt(rt2x00dev, RT2872) && - !rt2x00_rt(rt2x00dev, RT3070)) - return FW_BAD_VERSION; - - /* - * 8kb firmware files must be checked as if it were - * 2 separate firmware files. - */ - while (offset < len) { - if (!rt2800usb_check_crc(data + offset, 4096)) - return FW_BAD_CRC; - - offset += 4096; - } - - return FW_OK; -} - -static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev, - const u8 *data, const size_t len) -{ - unsigned int i; int status; - u32 reg; u32 offset; u32 length; @@ -151,29 +76,11 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev, length = 4096; } - /* - * Wait for stable hardware. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, MAC_CSR0, ®); - if (reg && reg != ~0) - break; - msleep(1); - } - - if (i == REGISTER_BUSY_COUNT) { - ERROR(rt2x00dev, "Unstable hardware.\n"); - return -EBUSY; - } - /* * Write firmware to device. */ - rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, - USB_VENDOR_REQUEST_OUT, - FIRMWARE_IMAGE_BASE, - data + offset, length, - REGISTER_TIMEOUT32(length)); + rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, + data + offset, length); rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); @@ -196,7 +103,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev, /* * Send signal to firmware during boot time. */ - rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); + rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071) || @@ -206,28 +113,6 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev, udelay(10); } - /* - * Wait for device to stabilize. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); - if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) - break; - msleep(1); - } - - if (i == REGISTER_BUSY_COUNT) { - ERROR(rt2x00dev, "PBF system register not ready.\n"); - return -EBUSY; - } - - /* - * Initialize firmware. - */ - rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); - rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); - msleep(1); - return 0; } @@ -246,6 +131,44 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev, rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); } +static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + int i; + + /* + * Wait until BBP and RF are ready. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg && reg != ~0) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000); + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); + rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, + USB_MODE_RESET, REGISTER_TIMEOUT); + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); + + return 0; +} + static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg; @@ -371,7 +294,9 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev, rt2800usb_toggle_rx(rt2x00dev, state); break; case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: + case STATE_RADIO_IRQ_OFF_ISR: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: @@ -395,25 +320,29 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev, /* * TX descriptor initialization */ +static void rt2800usb_write_tx_data(struct queue_entry* entry, + struct txentry_desc *txdesc) +{ + __le32 *txwi = (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE); + + rt2800_write_txwi(txwi, txdesc); +} + + static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); - __le32 *txi = (__le32 *)(skb->data - TXWI_DESC_SIZE - TXINFO_DESC_SIZE); + __le32 *txi = (__le32 *) skb->data; u32 word; - /* - * Initialize TXWI descriptor - */ - rt2800_write_txwi(skb, txdesc); - /* * Initialize TXINFO descriptor */ rt2x00_desc_read(txi, 0, &word); rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, - skb->len + TXWI_DESC_SIZE); + skb->len - TXINFO_DESC_SIZE); rt2x00_set_field32(&word, TXINFO_W0_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); @@ -426,6 +355,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, /* * Register descriptor details in skb frame descriptor. */ + skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txi; skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; } @@ -433,51 +363,6 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, /* * TX data initialization */ -static void rt2800usb_write_beacon(struct queue_entry *entry, - struct txentry_desc *txdesc) -{ - struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; - unsigned int beacon_base; - u32 reg; - - /* - * Disable beaconing while we are reloading the beacon data, - * otherwise we might be sending out invalid data. - */ - rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); - rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); - rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); - - /* - * Add the TXWI for the beacon to the skb. - */ - rt2800_write_txwi(entry->skb, txdesc); - skb_push(entry->skb, TXWI_DESC_SIZE); - - /* - * Write entire beacon with descriptor to register. - */ - beacon_base = HW_BEACON_OFFSET(entry->entry_idx); - rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, - USB_VENDOR_REQUEST_OUT, beacon_base, - entry->skb->data, entry->skb->len, - REGISTER_TIMEOUT32(entry->skb->len)); - - /* - * Enable beaconing again. - */ - rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); - rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); - rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); - rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); - - /* - * Clean up the beacon skb. - */ - dev_kfree_skb(entry->skb); - entry->skb = NULL; -} - static int rt2800usb_get_tx_data_len(struct queue_entry *entry) { int length; @@ -568,7 +453,7 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, /* * Process the RXWI structure. */ - rt2800_process_rxwi(entry->skb, rxdesc); + rt2800_process_rxwi(entry, rxdesc); } /* @@ -585,24 +470,10 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) return rt2800_validate_eeprom(rt2x00dev); } -static const struct rt2800_ops rt2800usb_rt2800_ops = { - .register_read = rt2x00usb_register_read, - .register_read_lock = rt2x00usb_register_read_lock, - .register_write = rt2x00usb_register_write, - .register_write_lock = rt2x00usb_register_write_lock, - - .register_multiread = rt2x00usb_register_multiread, - .register_multiwrite = rt2x00usb_register_multiwrite, - - .regbusy_read = rt2x00usb_regbusy_read, -}; - static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; - rt2x00dev->priv = (void *)&rt2800usb_rt2800_ops; - /* * Allocate eeprom data. */ @@ -635,6 +506,8 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); if (!modparam_nohwcrypt) __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); /* * Set the rssi offset. @@ -644,11 +517,45 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) return 0; } +static const struct ieee80211_ops rt2800usb_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_tim = rt2x00mac_set_tim, + .set_key = rt2x00mac_set_key, + .sw_scan_start = rt2x00mac_sw_scan_start, + .sw_scan_complete = rt2x00mac_sw_scan_complete, + .get_stats = rt2x00mac_get_stats, + .get_tkip_seq = rt2800_get_tkip_seq, + .set_rts_threshold = rt2800_set_rts_threshold, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt2800_conf_tx, + .get_tsf = rt2800_get_tsf, + .rfkill_poll = rt2x00mac_rfkill_poll, + .ampdu_action = rt2800_ampdu_action, +}; + +static const struct rt2800_ops rt2800usb_rt2800_ops = { + .register_read = rt2x00usb_register_read, + .register_read_lock = rt2x00usb_register_read_lock, + .register_write = rt2x00usb_register_write, + .register_write_lock = rt2x00usb_register_write_lock, + .register_multiread = rt2x00usb_register_multiread, + .register_multiwrite = rt2x00usb_register_multiwrite, + .regbusy_read = rt2x00usb_regbusy_read, + .drv_write_firmware = rt2800usb_write_firmware, + .drv_init_registers = rt2800usb_init_registers, +}; + static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .probe_hw = rt2800usb_probe_hw, .get_firmware_name = rt2800usb_get_firmware_name, - .check_firmware = rt2800usb_check_firmware, - .load_firmware = rt2800usb_load_firmware, + .check_firmware = rt2800_check_firmware, + .load_firmware = rt2800_load_firmware, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, @@ -657,9 +564,10 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .link_stats = rt2800_link_stats, .reset_tuner = rt2800_reset_tuner, .link_tuner = rt2800_link_tuner, + .watchdog = rt2x00usb_watchdog, .write_tx_desc = rt2800usb_write_tx_desc, - .write_tx_data = rt2x00usb_write_tx_data, - .write_beacon = rt2800usb_write_beacon, + .write_tx_data = rt2800usb_write_tx_data, + .write_beacon = rt2800_write_beacon, .get_tx_data_len = rt2800usb_get_tx_data_len, .kick_tx_queue = rt2x00usb_kick_tx_queue, .kill_tx_queue = rt2x00usb_kill_tx_queue, @@ -706,7 +614,8 @@ static const struct rt2x00_ops rt2800usb_ops = { .tx = &rt2800usb_queue_tx, .bcn = &rt2800usb_queue_bcn, .lib = &rt2800usb_rt2x00_ops, - .hw = &rt2800_mac80211_ops, + .drv = &rt2800usb_rt2800_ops, + .hw = &rt2800usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2800_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h index 2bca6a71a7f5e67911a37b9ac95fc37be8b45e18..0722badccf86cbe3dcfa1d2407da5f19dac295c1 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.h +++ b/drivers/net/wireless/rt2x00/rt2800usb.h @@ -31,43 +31,6 @@ #ifndef RT2800USB_H #define RT2800USB_H -/* - * USB registers. - */ - -/* - * USB_DMA_CFG - * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns. - * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes. - * PHY_CLEAR: phy watch dog enable. - * TX_CLEAR: Clear USB DMA TX path. - * TXOP_HALT: Halt TXOP count down when TX buffer is full. - * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation. - * RX_BULK_EN: Enable USB DMA Rx. - * TX_BULK_EN: Enable USB DMA Tx. - * EP_OUT_VALID: OUT endpoint data valid. - * RX_BUSY: USB DMA RX FSM busy. - * TX_BUSY: USB DMA TX FSM busy. - */ -#define USB_DMA_CFG 0x02a0 -#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff) -#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00) -#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000) -#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000) -#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000) -#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000) -#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000) -#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000) -#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000) -#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000) -#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000) - -/* - * USB_CYC_CFG - */ -#define USB_CYC_CFG 0x02a4 -#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff) - /* * 8051 firmware image. */ diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 6c1ff4c15c8430d4288b4bb0e687d812e912d1e5..c21af38cc5af57364694c8bc46e19867e2c8150d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -39,6 +39,7 @@ #include #include "rt2x00debug.h" +#include "rt2x00dump.h" #include "rt2x00leds.h" #include "rt2x00reg.h" #include "rt2x00queue.h" @@ -159,6 +160,7 @@ struct avg_val { enum rt2x00_chip_intf { RT2X00_CHIP_INTF_PCI, + RT2X00_CHIP_INTF_PCIE, RT2X00_CHIP_INTF_USB, RT2X00_CHIP_INTF_SOC, }; @@ -175,8 +177,7 @@ struct rt2x00_chip { #define RT2570 0x2570 #define RT2661 0x2661 #define RT2573 0x2573 -#define RT2860 0x2860 /* 2.4GHz PCI/CB */ -#define RT2870 0x2870 +#define RT2860 0x2860 /* 2.4GHz */ #define RT2872 0x2872 /* WSOC */ #define RT2883 0x2883 /* WSOC */ #define RT3070 0x3070 @@ -331,6 +332,11 @@ struct link { * Work structure for scheduling periodic link tuning. */ struct delayed_work work; + + /* + * Work structure for scheduling periodic watchdog monitoring. + */ + struct delayed_work watchdog_work; }; /* @@ -508,6 +514,11 @@ struct rt2x00lib_ops { */ irq_handler_t irq_handler; + /* + * Threaded Interrupt handlers. + */ + irq_handler_t irq_handler_thread; + /* * Device init handlers. */ @@ -542,6 +553,7 @@ struct rt2x00lib_ops { struct link_qual *qual); void (*link_tuner) (struct rt2x00_dev *rt2x00dev, struct link_qual *qual, const u32 count); + void (*watchdog) (struct rt2x00_dev *rt2x00dev); /* * TX control handlers @@ -549,8 +561,8 @@ struct rt2x00lib_ops { void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc); - int (*write_tx_data) (struct queue_entry *entry, - struct txentry_desc *txdesc); + void (*write_tx_data) (struct queue_entry *entry, + struct txentry_desc *txdesc); void (*write_beacon) (struct queue_entry *entry, struct txentry_desc *txdesc); int (*get_tx_data_len) (struct queue_entry *entry); @@ -609,6 +621,7 @@ struct rt2x00_ops { const struct data_queue_desc *bcn; const struct data_queue_desc *atim; const struct rt2x00lib_ops *lib; + const void *drv; const struct ieee80211_ops *hw; #ifdef CONFIG_RT2X00_LIB_DEBUGFS const struct rt2x00debug *debugfs; @@ -627,6 +640,7 @@ enum rt2x00_flags { DEVICE_STATE_INITIALIZED, DEVICE_STATE_STARTED, DEVICE_STATE_ENABLED_RADIO, + DEVICE_STATE_SCANNING, /* * Driver requirements @@ -645,6 +659,9 @@ enum rt2x00_flags { CONFIG_SUPPORT_HW_CRYPTO, DRIVER_SUPPORT_CONTROL_FILTERS, DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, + DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, + DRIVER_SUPPORT_LINK_TUNING, + DRIVER_SUPPORT_WATCHDOG, /* * Driver configuration @@ -654,7 +671,6 @@ enum rt2x00_flags { CONFIG_EXTERNAL_LNA_A, CONFIG_EXTERNAL_LNA_BG, CONFIG_DOUBLE_ANTENNA, - CONFIG_DISABLE_LINK_TUNING, CONFIG_CHANNEL_HT40, }; @@ -862,9 +878,10 @@ struct rt2x00_dev { const struct firmware *fw; /* - * Driver specific data. + * Interrupt values, stored between interrupt service routine + * and interrupt thread routine. */ - void *priv; + u32 irqvalue[2]; }; /* @@ -978,7 +995,13 @@ static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev, static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev) { - return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); + return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI) || + rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); +} + +static inline bool rt2x00_is_pcie(struct rt2x00_dev *rt2x00dev) +{ + return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); } static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev) @@ -998,6 +1021,13 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev) */ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); +/** + * rt2x00queue_unmap_skb - Unmap a skb from DMA. + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @skb: The skb to unmap. + */ +void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); + /** * rt2x00queue_get_queue - Convert queue index to queue pointer * @rt2x00dev: Pointer to &struct rt2x00_dev. @@ -1014,10 +1044,31 @@ struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, enum queue_index index); +/* + * Debugfs handlers. + */ +/** + * rt2x00debug_dump_frame - Dump a frame to userspace through debugfs. + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @type: The type of frame that is being dumped. + * @skb: The skb containing the frame to be dumped. + */ +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, + enum rt2x00_dump_type type, struct sk_buff *skb); +#else +static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, + enum rt2x00_dump_type type, + struct sk_buff *skb) +{ +} +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + /* * Interrupt context handlers. */ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev); void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc); void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, @@ -1047,6 +1098,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, #else #define rt2x00mac_set_key NULL #endif /* CONFIG_RT2X00_LIB_CRYPTO */ +void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw); +void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw); int rt2x00mac_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c index 098315a271ca0473486fcc9fdb8e7ad7928ecea8..953dc4f2c6affa5f808928718089aa1e12bde312 100644 --- a/drivers/net/wireless/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/rt2x00/rt2x00config.c @@ -41,10 +41,12 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, switch (type) { case NL80211_IFTYPE_ADHOC: + conf.sync = TSF_SYNC_ADHOC; + break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_WDS: - conf.sync = TSF_SYNC_BEACON; + conf.sync = TSF_SYNC_AP_NONE; break; case NL80211_IFTYPE_STATION: conf.sync = TSF_SYNC_INFRA; @@ -170,23 +172,27 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, unsigned int ieee80211_flags) { struct rt2x00lib_conf libconf; + u16 hw_value; memset(&libconf, 0, sizeof(libconf)); libconf.conf = conf; if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { - if (conf_is_ht40(conf)) + if (conf_is_ht40(conf)) { __set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); - else + hw_value = rt2x00ht_center_channel(rt2x00dev, conf); + } else { __clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); + hw_value = conf->channel->hw_value; + } memcpy(&libconf.rf, - &rt2x00dev->spec.channels[conf->channel->hw_value], + &rt2x00dev->spec.channels[hw_value], sizeof(libconf.rf)); memcpy(&libconf.channel, - &rt2x00dev->spec.channels_info[conf->channel->hw_value], + &rt2x00dev->spec.channels_info[hw_value], sizeof(libconf.channel)); } diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c index e9fe93fd804268a943368d65237c2657896bd25f..b0498e7e7aae95a733eef24bbb139ee0486c690b 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -211,6 +211,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)) skb_queue_purge(&intf->frame_dump_skbqueue); } +EXPORT_SYMBOL_GPL(rt2x00debug_dump_frame); static int rt2x00debug_file_open(struct inode *inode, struct file *file) { diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index f20d3eeeea7fe0ee4001bf3597255bab1ad9856e..585e8166f22a635f628a5524d040d0f1807b3805 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -69,6 +69,11 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) */ rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + /* + * Start watchdog monitoring. + */ + rt2x00link_start_watchdog(rt2x00dev); + /* * Start the TX queues. */ @@ -88,6 +93,11 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) ieee80211_stop_queues(rt2x00dev->hw); rt2x00queue_stop_queues(rt2x00dev); + /* + * Stop watchdog monitoring. + */ + rt2x00link_stop_watchdog(rt2x00dev); + /* * Disable RX. */ @@ -168,10 +178,32 @@ static void rt2x00lib_intf_scheduled(struct work_struct *work) /* * Interrupt context handlers. */ -static void rt2x00lib_beacondone_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) { - struct rt2x00_intf *intf = vif_to_intf(vif); + struct rt2x00_dev *rt2x00dev = data; + struct sk_buff *skb; + + /* + * Only AP mode interfaces do broad- and multicast buffering + */ + if (vif->type != NL80211_IFTYPE_AP) + return; + + /* + * Send out buffered broad- and multicast frames + */ + skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); + while (skb) { + rt2x00mac_tx(rt2x00dev->hw, skb); + skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); + } +} + +static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rt2x00_dev *rt2x00dev = data; if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && @@ -179,9 +211,7 @@ static void rt2x00lib_beacondone_iter(void *data, u8 *mac, vif->type != NL80211_IFTYPE_WDS) return; - spin_lock(&intf->lock); - intf->delayed_flags |= DELAYED_UPDATE_BEACON; - spin_unlock(&intf->lock); + rt2x00queue_update_beacon(rt2x00dev, vif, true); } void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) @@ -189,14 +219,37 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; - ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, - rt2x00lib_beacondone_iter, - rt2x00dev); + /* send buffered bc/mc frames out for every bssid */ + ieee80211_iterate_active_interfaces(rt2x00dev->hw, + rt2x00lib_bc_buffer_iter, + rt2x00dev); + /* + * Devices with pre tbtt interrupt don't need to update the beacon + * here as they will fetch the next beacon directly prior to + * transmission. + */ + if (test_bit(DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, &rt2x00dev->flags)) + return; - ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work); + /* fetch next beacon */ + ieee80211_iterate_active_interfaces(rt2x00dev->hw, + rt2x00lib_beaconupdate_iter, + rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); +void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* fetch next beacon */ + ieee80211_iterate_active_interfaces(rt2x00dev->hw, + rt2x00lib_beaconupdate_iter, + rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); + void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { @@ -215,6 +268,16 @@ void rt2x00lib_txdone(struct queue_entry *entry, */ rt2x00queue_unmap_skb(rt2x00dev, entry->skb); + /* + * Remove the extra tx headroom from the skb. + */ + skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom); + + /* + * Signal that the TX descriptor is no longer in the skb. + */ + skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; + /* * Remove L2 padding which was added during */ @@ -224,7 +287,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, /* * If the IV/EIV data was stripped from the frame before it was * passed to the hardware, we should now reinsert it again because - * mac80211 will expect the the same data to be present it the + * mac80211 will expect the same data to be present it the * frame as it was passed to us. */ if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) @@ -241,8 +304,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, */ success = test_bit(TXDONE_SUCCESS, &txdesc->flags) || - test_bit(TXDONE_UNKNOWN, &txdesc->flags) || - test_bit(TXDONE_FALLBACK, &txdesc->flags); + test_bit(TXDONE_UNKNOWN, &txdesc->flags); /* * Update TX statistics. @@ -264,11 +326,22 @@ void rt2x00lib_txdone(struct queue_entry *entry, /* * Frame was send with retries, hardware tried * different rates to send out the frame, at each - * retry it lowered the rate 1 step. + * retry it lowered the rate 1 step except when the + * lowest rate was used. */ for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) { tx_info->status.rates[i].idx = rate_idx - i; tx_info->status.rates[i].flags = rate_flags; + + if (rate_idx - i == 0) { + /* + * The lowest rate (index 0) was used until the + * number of max retries was reached. + */ + tx_info->status.rates[i].count = retry_rates - i; + i++; + break; + } tx_info->status.rates[i].count = 1; } if (i < (IEEE80211_TX_MAX_RATES - 1)) @@ -281,6 +354,21 @@ void rt2x00lib_txdone(struct queue_entry *entry, rt2x00dev->low_level_stats.dot11ACKFailureCount++; } + /* + * Every single frame has it's own tx status, hence report + * every frame as ampdu of size 1. + * + * TODO: if we can find out how many frames were aggregated + * by the hw we could provide the real ampdu_len to mac80211 + * which would allow the rc algorithm to better decide on + * which rates are suitable. + */ + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { + tx_info->flags |= IEEE80211_TX_STAT_AMPDU; + tx_info->status.ampdu_len = 1; + tx_info->status.ampdu_ack_len = success ? 1 : 0; + } + if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { if (success) rt2x00dev->low_level_stats.dot11RTSSuccessCount++; @@ -295,9 +383,17 @@ void rt2x00lib_txdone(struct queue_entry *entry, * send the status report back. */ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) - ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb); + /* + * Only PCI and SOC devices process the tx status in process + * context. Hence use ieee80211_tx_status for PCI and SOC + * devices and stick to ieee80211_tx_status_irqsafe for USB. + */ + if (rt2x00_is_usb(rt2x00dev)) + ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb); + else + ieee80211_tx_status(rt2x00dev->hw, entry->skb); else - dev_kfree_skb_irq(entry->skb); + dev_kfree_skb_any(entry->skb); /* * Make this entry available for reuse. @@ -444,7 +540,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb); memcpy(IEEE80211_SKB_RXCB(entry->skb), rx_status, sizeof(*rx_status)); - ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb); + + /* + * Currently only PCI and SOC devices handle rx interrupts in process + * context. Hence, use ieee80211_rx_irqsafe for USB and ieee80211_rx_ni + * for PCI and SOC devices. + */ + if (rt2x00_is_usb(rt2x00dev)) + ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb); + else + ieee80211_rx_ni(rt2x00dev->hw, entry->skb); /* * Replace the skb with the freshly allocated one. diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h index ed303b423e4180e095b68f0ab11459e675679098..5d6e0b83151faad1598886aa405e37d3b5f3d7e0 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dump.h +++ b/drivers/net/wireless/rt2x00/rt2x00dump.h @@ -20,7 +20,12 @@ /* Module: rt2x00dump - Abstract: Data structures for the rt2x00debug & userspace. + Abstract: + Data structures for the rt2x00debug & userspace. + + The declarations in this file can be used by both rt2x00 + and userspace and therefore should be kept together in + this file. */ #ifndef RT2X00DUMP_H @@ -111,7 +116,7 @@ struct rt2x00dump_hdr { __le16 chip_rt; __le16 chip_rf; - __le32 chip_rev; + __le16 chip_rev; __le16 type; __u8 queue_index; diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c index 5a407602ce3e2da54bae581765e89a7842fe8935..c004cd3a8847c852c5b007ec5cdf2faf220c16f5 100644 --- a/drivers/net/wireless/rt2x00/rt2x00ht.c +++ b/drivers/net/wireless/rt2x00/rt2x00ht.c @@ -44,11 +44,22 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, txdesc->mpdu_density = 0; txdesc->ba_size = 7; /* FIXME: What value is needed? */ - txdesc->stbc = 0; /* FIXME: What value is needed? */ - txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); - if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - txdesc->mcs |= 0x08; + txdesc->stbc = + (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT; + + /* + * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the + * mcs rate to be used + */ + if (txrate->flags & IEEE80211_TX_RC_MCS) { + txdesc->mcs = txrate->idx; + } else { + txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); + if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + txdesc->mcs |= 0x08; + } + /* * Convert flags @@ -84,3 +95,31 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, else txdesc->txop = TXOP_HTTXOP; } + +u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + int center_channel; + u16 i; + + /* + * Initialize center channel to current channel. + */ + center_channel = spec->channels[conf->channel->hw_value].channel; + + /* + * Adjust center channel to HT40+ and HT40- operation. + */ + if (conf_is_ht40_plus(conf)) + center_channel += 2; + else if (conf_is_ht40_minus(conf)) + center_channel -= (center_channel == 14) ? 1 : 2; + + for (i = 0; i < spec->num_channels; i++) + if (spec->channels[i].channel == center_channel) + return i; + + WARN_ON(1); + return conf->channel->hw_value; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index be2e37fb40718c3b51771f6250d4373dade15ee3..dc5c6574aaf4f13efb6f43213da5fce85d8040be 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -27,11 +27,10 @@ #ifndef RT2X00LIB_H #define RT2X00LIB_H -#include "rt2x00dump.h" - /* * Interval defines */ +#define WATCHDOG_INTERVAL round_jiffies_relative(HZ) #define LINK_TUNE_INTERVAL round_jiffies_relative(HZ) /* @@ -106,13 +105,6 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry); -/** - * rt2x00queue_unmap_skb - Unmap a skb from DMA. - * @rt2x00dev: Pointer to &struct rt2x00_dev. - * @skb: The skb to unmap. - */ -void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); - /** * rt2x00queue_free_skb - free a skb * @rt2x00dev: Pointer to &struct rt2x00_dev. @@ -266,11 +258,30 @@ void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev); void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna); /** - * rt2x00link_register - Initialize link tuning functionality + * rt2x00link_start_watchdog - Start periodic watchdog monitoring + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * + * This start the watchdog periodic work, this work will + *be executed periodically until &rt2x00link_stop_watchdog has + * been called. + */ +void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00link_stop_watchdog - Stop periodic watchdog monitoring * @rt2x00dev: Pointer to &struct rt2x00_dev. * - * Initialize work structure and all link tuning related - * parameters. This will not start the link tuning process itself. + * After this function completed the watchdog monitoring will not + * be running until &rt2x00link_start_watchdog is called. + */ +void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00link_register - Initialize link tuning & watchdog functionality + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * + * Initialize work structure and all link tuning and watchdog related + * parameters. This will not start the periodic work itself. */ void rt2x00link_register(struct rt2x00_dev *rt2x00dev); @@ -296,8 +307,6 @@ static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) #ifdef CONFIG_RT2X00_LIB_DEBUGFS void rt2x00debug_register(struct rt2x00_dev *rt2x00dev); void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); -void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, - enum rt2x00_dump_type type, struct sk_buff *skb); void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, struct rxdone_entry_desc *rxdesc); #else @@ -309,12 +318,6 @@ static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) { } -static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, - enum rt2x00_dump_type type, - struct sk_buff *skb) -{ -} - static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, struct rxdone_entry_desc *rxdesc) { @@ -384,12 +387,21 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, struct txentry_desc *txdesc, const struct rt2x00_rate *hwrate); + +u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf); #else static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, struct txentry_desc *txdesc, const struct rt2x00_rate *hwrate) { } + +static inline u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf) +{ + return conf->channel->hw_value; +} #endif /* CONFIG_RT2X00_LIB_HT */ /* diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c index 0efbf5a6c25414a6780a00ec59bde104d29044b8..666cef3f8472e3da35bfd98d6dd74eba1d8d162e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/rt2x00/rt2x00link.c @@ -271,11 +271,20 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) /* * Link tuning should only be performed when - * an active sta or master interface exists. - * Single monitor mode interfaces should never have - * work with link tuners. + * an active sta interface exists. AP interfaces + * don't need link tuning and monitor mode interfaces + * should never have to work with link tuners. */ - if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count) + if (!rt2x00dev->intf_sta_count) + return; + + /** + * While scanning, link tuning is disabled. By default + * the most sensitive settings will be used to make sure + * that all beacons and probe responses will be recieved + * during the scan. + */ + if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) return; rt2x00link_reset_tuner(rt2x00dev, false); @@ -293,6 +302,7 @@ void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev) void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna) { struct link_qual *qual = &rt2x00dev->link.qual; + u8 vgc_level = qual->vgc_level_reg; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; @@ -308,6 +318,13 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna) rt2x00dev->link.count = 0; memset(qual, 0, sizeof(*qual)); + /* + * Restore the VGC level as stored in the registers, + * the driver can use this to determine if the register + * must be updated during reset or not. + */ + qual->vgc_level_reg = vgc_level; + /* * Reset the link tuner. */ @@ -338,7 +355,8 @@ static void rt2x00link_tuner(struct work_struct *work) * When the radio is shutting down we should * immediately cease all link tuning. */ - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || + test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) return; /* @@ -359,10 +377,11 @@ static void rt2x00link_tuner(struct work_struct *work) qual->rssi = link->avg_rssi.avg; /* - * Only perform the link tuning when Link tuning - * has been enabled (This could have been disabled from the EEPROM). + * Check if link tuning is supported by the hardware, some hardware + * do not support link tuning at all, while other devices can disable + * the feature from the EEPROM. */ - if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags)) + if (test_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags)) rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count); /* @@ -388,7 +407,45 @@ static void rt2x00link_tuner(struct work_struct *work) &link->work, LINK_TUNE_INTERVAL); } +void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev) +{ + struct link *link = &rt2x00dev->link; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || + !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags)) + return; + + ieee80211_queue_delayed_work(rt2x00dev->hw, + &link->watchdog_work, WATCHDOG_INTERVAL); +} + +void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev) +{ + cancel_delayed_work_sync(&rt2x00dev->link.watchdog_work); +} + +static void rt2x00link_watchdog(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, link.watchdog_work.work); + struct link *link = &rt2x00dev->link; + + /* + * When the radio is shutting down we should + * immediately cease the watchdog monitoring. + */ + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + rt2x00dev->ops->lib->watchdog(rt2x00dev); + + if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + ieee80211_queue_delayed_work(rt2x00dev->hw, + &link->watchdog_work, WATCHDOG_INTERVAL); +} + void rt2x00link_register(struct rt2x00_dev *rt2x00dev) { + INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog); INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); } diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index abbd857ec7592a5ea1b1e22aff581414729d5ff9..235e037e65092045d07db64893b7cbdaa29b5785 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -273,16 +273,24 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, mutex_init(&intf->beacon_skb_mutex); intf->beacon = entry; - if (vif->type == NL80211_IFTYPE_AP) - memcpy(&intf->bssid, vif->addr, ETH_ALEN); - memcpy(&intf->mac, vif->addr, ETH_ALEN); - /* * The MAC adddress must be configured after the device * has been initialized. Otherwise the device can reset * the MAC registers. + * The BSSID address must only be configured in AP mode, + * however we should not send an empty BSSID address for + * STA interfaces at this time, since this can cause + * invalid behavior in the device. */ - rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL); + memcpy(&intf->mac, vif->addr, ETH_ALEN); + if (vif->type == NL80211_IFTYPE_AP) { + memcpy(&intf->bssid, vif->addr, ETH_ALEN); + rt2x00lib_config_intf(rt2x00dev, intf, vif->type, + intf->mac, intf->bssid); + } else { + rt2x00lib_config_intf(rt2x00dev, intf, vif->type, + intf->mac, NULL); + } /* * Some filters depend on the current working mode. We can force @@ -346,9 +354,11 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) /* * Some configuration parameters (e.g. channel and antenna values) can * only be set when the radio is enabled, but do require the RX to - * be off. + * be off. During this period we should keep link tuning enabled, + * if for any reason the link tuner must be reset, this will be + * handled by rt2x00lib_config(). */ - rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); /* * When we've just turned on the radio, we want to reprogram @@ -366,7 +376,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); /* Turn RX back on */ - rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); return 0; } @@ -430,12 +440,36 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw, } EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); +static void rt2x00mac_set_tim_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rt2x00_intf *intf = vif_to_intf(vif); + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC && + vif->type != NL80211_IFTYPE_MESH_POINT && + vif->type != NL80211_IFTYPE_WDS) + return; + + spin_lock(&intf->lock); + intf->delayed_flags |= DELAYED_UPDATE_BEACON; + spin_unlock(&intf->lock); +} + int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct rt2x00_dev *rt2x00dev = hw->priv; - rt2x00lib_beacondone(rt2x00dev); + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return 0; + + ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, + rt2x00mac_set_tim_iter, + rt2x00dev); + + /* queue work to upodate the beacon template */ + ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_set_tim); @@ -539,6 +573,22 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, EXPORT_SYMBOL_GPL(rt2x00mac_set_key); #endif /* CONFIG_RT2X00_LIB_CRYPTO */ +void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + __set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); + rt2x00link_stop_tuner(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start); + +void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + __clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); + rt2x00link_start_tuner(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_complete); + int rt2x00mac_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { @@ -562,7 +612,6 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); - int update_bssid = 0; /* * mac80211 might be calling this function while we are trying @@ -577,10 +626,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, * conf->bssid can be NULL if coming from the internal * beacon update routine. */ - if (changes & BSS_CHANGED_BSSID) { - update_bssid = 1; + if (changes & BSS_CHANGED_BSSID) memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN); - } spin_unlock(&intf->lock); @@ -592,7 +639,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, */ if (changes & BSS_CHANGED_BSSID) rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL, - update_bssid ? bss_conf->bssid : NULL); + bss_conf->bssid); /* * Update the beacon. diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index f71eee67f977de43e0c2aa61b0355ff29dc8d3ae..19b262e1ddbe284beb0845934444f071afcba531 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -60,34 +60,6 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, } EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); -/* - * TX data handlers. - */ -int rt2x00pci_write_tx_data(struct queue_entry *entry, - struct txentry_desc *txdesc) -{ - struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; - - /* - * This should not happen, we already checked the entry - * was ours. When the hardware disagrees there has been - * a queue corruption! - */ - if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) { - ERROR(rt2x00dev, - "Corrupt queue %d, accessing entry which is not ours.\n" - "Please file bug report to %s.\n", - entry->queue->qid, DRV_PROJECT); - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data); - -/* - * TX/RX data handlers. - */ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue = rt2x00dev->rx; @@ -181,8 +153,10 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) /* * Register interrupt handler. */ - status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler, - IRQF_SHARED, rt2x00dev->name, rt2x00dev); + status = request_threaded_irq(rt2x00dev->irq, + rt2x00dev->ops->lib->irq_handler, + rt2x00dev->ops->lib->irq_handler_thread, + IRQF_SHARED, rt2x00dev->name, rt2x00dev); if (status) { ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", rt2x00dev->irq, status); @@ -305,7 +279,10 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) rt2x00dev->irq = pci_dev->irq; rt2x00dev->name = pci_name(pci_dev); - rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); + if (pci_dev->is_pcie) + rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); + else + rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); retval = rt2x00pci_alloc_reg(rt2x00dev); if (retval) diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h index 51bcef3839ce3ae52fe00974de87d0978f43ef4f..b854d62ff99bde9e3ada5bbd4811a6797ac1c677 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h @@ -85,16 +85,6 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, const struct rt2x00_field32 field, u32 *reg); -/** - * rt2x00pci_write_tx_data - Initialize data for TX operation - * @entry: The entry where the frame is located - * - * This function will initialize the DMA and skb descriptor - * to prepare the entry for the actual TX operation. - */ -int rt2x00pci_write_tx_data(struct queue_entry *entry, - struct txentry_desc *txdesc); - /** * struct queue_entry_priv_pci: Per entry PCI specific information * diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 20dbdd6fb9047e63ed98a64d084072f1642084e8..a3401d301058870745879c6ec0b19fa2d29ebfda 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -100,21 +100,8 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); - /* - * If device has requested headroom, we should make sure that - * is also mapped to the DMA so it can be used for transfering - * additional descriptor information to the hardware. - */ - skb_push(skb, rt2x00dev->ops->extra_tx_headroom); - skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE); - - /* - * Restore data pointer to original location again. - */ - skb_pull(skb, rt2x00dev->ops->extra_tx_headroom); - skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; } EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); @@ -130,16 +117,12 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) } if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { - /* - * Add headroom to the skb length, it has been removed - * by the driver, but it was actually mapped to DMA. - */ - dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, - skb->len + rt2x00dev->ops->extra_tx_headroom, + dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, DMA_TO_DEVICE); skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; } } +EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) { @@ -370,12 +353,17 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, /* * Check if more fragments are pending */ - if (ieee80211_has_morefrags(hdr->frame_control) || - (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) { + if (ieee80211_has_morefrags(hdr->frame_control)) { __set_bit(ENTRY_TXD_BURST, &txdesc->flags); __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); } + /* + * Check if more frames (!= fragments) are pending + */ + if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) + __set_bit(ENTRY_TXD_BURST, &txdesc->flags); + /* * Beacons and probe responses require the tsf timestamp * to be inserted into the frame, except for a frame that has been injected @@ -416,12 +404,51 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); } +static int rt2x00queue_write_tx_data(struct queue_entry *entry, + struct txentry_desc *txdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + + /* + * This should not happen, we already checked the entry + * was ours. When the hardware disagrees there has been + * a queue corruption! + */ + if (unlikely(rt2x00dev->ops->lib->get_entry_state && + rt2x00dev->ops->lib->get_entry_state(entry))) { + ERROR(rt2x00dev, + "Corrupt queue %d, accessing entry which is not ours.\n" + "Please file bug report to %s.\n", + entry->queue->qid, DRV_PROJECT); + return -EINVAL; + } + + /* + * Add the requested extra tx headroom in front of the skb. + */ + skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); + memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); + + /* + * Call the driver's write_tx_data function, if it exists. + */ + if (rt2x00dev->ops->lib->write_tx_data) + rt2x00dev->ops->lib->write_tx_data(entry, txdesc); + + /* + * Map the skb to DMA. + */ + if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) + rt2x00queue_map_txskb(rt2x00dev, entry->skb); + + return 0; +} + static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, struct txentry_desc *txdesc) { struct data_queue *queue = entry->queue; struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; - enum rt2x00_dump_type dump_type; rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); @@ -429,9 +456,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, * All processing on the frame has been completed, this means * it is now ready to be dumped to userspace through debugfs. */ - dump_type = (txdesc->queue == QID_BEACON) ? - DUMP_FRAME_BEACON : DUMP_FRAME_TX; - rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb); + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); } static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, @@ -530,16 +555,12 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, * call failed. Since we always return NETDEV_TX_OK to mac80211, * this frame will simply be dropped. */ - if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry, - &txdesc))) { + if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); entry->skb = NULL; return -EIO; } - if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) - rt2x00queue_map_txskb(queue->rt2x00dev, skb); - set_bit(ENTRY_DATA_PENDING, &entry->flags); rt2x00queue_index_inc(queue, Q_INDEX); @@ -594,11 +615,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, memset(skbdesc, 0, sizeof(*skbdesc)); skbdesc->entry = intf->beacon; - /* - * Write TX descriptor into reserved room in front of the beacon. - */ - rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc); - /* * Send beacon to hardware and enable beacon genaration.. */ @@ -672,9 +688,11 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) if (index == Q_INDEX) { queue->length++; + queue->last_index = jiffies; } else if (index == Q_INDEX_DONE) { queue->length--; queue->count++; + queue->last_index_done = jiffies; } spin_unlock_irqrestore(&queue->lock, irqflags); @@ -688,6 +706,8 @@ static void rt2x00queue_reset(struct data_queue *queue) queue->count = 0; queue->length = 0; + queue->last_index = jiffies; + queue->last_index_done = jiffies; memset(queue->index, 0, sizeof(queue->index)); spin_unlock_irqrestore(&queue->lock, irqflags); diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h index f79170849addf1737318da037e7c84d9a5a557cd..191e7775a9c0b418f791a134ca13217b6f91b25d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/rt2x00/rt2x00queue.h @@ -213,9 +213,16 @@ struct rxdone_entry_desc { /** * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc * + * Every txdone report has to contain the basic result of the + * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or + * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in + * conjunction with all of these flags but should only be set + * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used + * in conjunction with &TXDONE_FAILURE. + * * @TXDONE_UNKNOWN: Hardware could not determine success of transmission. * @TXDONE_SUCCESS: Frame was successfully send - * @TXDONE_FALLBACK: Frame was successfully send using a fallback rate. + * @TXDONE_FALLBACK: Hardware used fallback rates for retries * @TXDONE_FAILURE: Frame was not successfully send * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the * frame transmission failed due to excessive retries. @@ -439,6 +446,8 @@ struct data_queue { enum data_queue_qid qid; spinlock_t lock; + unsigned long last_index; + unsigned long last_index_done; unsigned int count; unsigned short limit; unsigned short threshold; @@ -591,6 +600,15 @@ static inline int rt2x00queue_threshold(struct data_queue *queue) return rt2x00queue_available(queue) < queue->threshold; } +/** + * rt2x00queue_timeout - Check if a timeout occured for this queue + * @queue: Queue to check. + */ +static inline int rt2x00queue_timeout(struct data_queue *queue) +{ + return time_after(queue->last_index, queue->last_index_done + (HZ / 10)); +} + /** * _rt2x00_desc_read - Read a word from the hardware descriptor. * @desc: Base descriptor address diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h index b9fe94873ee00ddee8389b9bbdb041104e43686b..cef94621cef791ade84a675c629c56ff16bebc72 100644 --- a/drivers/net/wireless/rt2x00/rt2x00reg.h +++ b/drivers/net/wireless/rt2x00/rt2x00reg.h @@ -63,7 +63,8 @@ enum led_mode { enum tsf_sync { TSF_SYNC_NONE = 0, TSF_SYNC_INFRA = 1, - TSF_SYNC_BEACON = 2, + TSF_SYNC_ADHOC = 2, + TSF_SYNC_AP_NONE = 3, }; /* @@ -88,6 +89,8 @@ enum dev_state { STATE_RADIO_RX_OFF_LINK, STATE_RADIO_IRQ_ON, STATE_RADIO_IRQ_OFF, + STATE_RADIO_IRQ_ON_ISR, + STATE_RADIO_IRQ_OFF_ISR, }; /* diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index bd1546ba7ad27206c1d17f00e012c2ca942c17cf..ff3a36622d1b88de75353a72f8abc35ee5d26c44 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -112,26 +112,6 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, const u16 buffer_length, const int timeout) -{ - int status; - - mutex_lock(&rt2x00dev->csr_mutex); - - status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, - requesttype, offset, buffer, - buffer_length, timeout); - - mutex_unlock(&rt2x00dev->csr_mutex); - - return status; -} -EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); - -int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev, - const u8 request, const u8 requesttype, - const u16 offset, const void *buffer, - const u16 buffer_length, - const int timeout) { int status = 0; unsigned char *tb; @@ -157,7 +137,7 @@ int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev, return status; } -EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff); +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, @@ -216,48 +196,28 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) rt2x00lib_txdone(entry, &txdesc); } -int rt2x00usb_write_tx_data(struct queue_entry *entry, - struct txentry_desc *txdesc) +static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb *entry_priv = entry->priv_data; u32 length; - /* - * Add the descriptor in front of the skb. - */ - skb_push(entry->skb, entry->queue->desc_size); - memset(entry->skb->data, 0, entry->queue->desc_size); - - /* - * USB devices cannot blindly pass the skb->len as the - * length of the data to usb_fill_bulk_urb. Pass the skb - * to the driver to determine what the length should be. - */ - length = rt2x00dev->ops->lib->get_tx_data_len(entry); - - usb_fill_bulk_urb(entry_priv->urb, usb_dev, - usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), - entry->skb->data, length, - rt2x00usb_interrupt_txdone, entry); - - /* - * Make sure the skb->data pointer points to the frame, not the - * descriptor. - */ - skb_pull(entry->skb, entry->queue->desc_size); + if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) { + /* + * USB devices cannot blindly pass the skb->len as the + * length of the data to usb_fill_bulk_urb. Pass the skb + * to the driver to determine what the length should be. + */ + length = rt2x00dev->ops->lib->get_tx_data_len(entry); - return 0; -} -EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); + usb_fill_bulk_urb(entry_priv->urb, usb_dev, + usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), + entry->skb->data, length, + rt2x00usb_interrupt_txdone, entry); -static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) -{ - struct queue_entry_priv_usb *entry_priv = entry->priv_data; - - if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) usb_submit_urb(entry_priv->urb, GFP_ATOMIC); + } } void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, @@ -332,6 +292,56 @@ void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, } EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); +static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue) +{ + struct queue_entry_priv_usb *entry_priv; + unsigned short threshold = queue->threshold; + + WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid); + + /* + * Temporarily disable the TX queue, this will force mac80211 + * to use the other queues until this queue has been restored. + * + * Set the queue threshold to the queue limit. This prevents the + * queue from being enabled during the txdone handler. + */ + queue->threshold = queue->limit; + ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); + + /* + * Reset all currently uploaded TX frames. + */ + while (!rt2x00queue_empty(queue)) { + entry_priv = rt2x00queue_get_entry(queue, Q_INDEX_DONE)->priv_data; + usb_kill_urb(entry_priv->urb); + + /* + * We need a short delay here to wait for + * the URB to be canceled and invoked the tx_done handler. + */ + udelay(200); + } + + /* + * The queue has been reset, and mac80211 is allowed to use the + * queue again. + */ + queue->threshold = threshold; + ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); +} + +void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + tx_queue_for_each(rt2x00dev, queue) { + if (rt2x00queue_timeout(queue)) + rt2x00usb_watchdog_reset_tx(queue); + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); + /* * RX data handlers. */ diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h index 621d0f8292514cf93079dbb6a553d9524bbc8d76..d3d3ddc408759d755a8e55df993ee0c4853084c8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.h +++ b/drivers/net/wireless/rt2x00/rt2x00usb.h @@ -166,25 +166,6 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, const u16 offset, void *buffer, const u16 buffer_length, const int timeout); -/** - * rt2x00usb_vendor_request_large_buff - Send register command to device (buffered) - * @rt2x00dev: Pointer to &struct rt2x00_dev - * @request: USB vendor command (See &enum rt2x00usb_vendor_request) - * @requesttype: Request type &USB_VENDOR_REQUEST_* - * @offset: Register start offset to perform action on - * @buffer: Buffer where information will be read/written to by device - * @buffer_length: Size of &buffer - * @timeout: Operation timeout - * - * This function is used to transfer register data in blocks larger - * then CSR_CACHE_SIZE. Use for firmware upload, keys and beacons. - */ -int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev, - const u8 request, const u8 requesttype, - const u16 offset, const void *buffer, - const u16 buffer_length, - const int timeout); - /** * rt2x00usb_vendor_request_sw - Send single register command to device * @rt2x00dev: Pointer to &struct rt2x00_dev @@ -369,16 +350,6 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, */ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev); -/** - * rt2x00usb_write_tx_data - Initialize URB for TX operation - * @entry: The entry where the frame is located - * - * This function will initialize the URB and skb descriptor - * to prepare the entry for the actual TX operation. - */ -int rt2x00usb_write_tx_data(struct queue_entry *entry, - struct txentry_desc *txdesc); - /** * struct queue_entry_priv_usb: Per entry USB specific information * @@ -428,6 +399,16 @@ void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, const enum data_queue_qid qid); +/** + * rt2x00usb_watchdog - Watchdog for USB communication + * @rt2x00dev: Pointer to &struct rt2x00_dev + * + * Check the health of the USB communication and determine + * if timeouts have occured. If this is the case, this function + * will reset all communication to restore functionality again. + */ +void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev); + /* * Device initialization handlers. */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 6a74baf4e934b97e4ac3978341736790f18fb6e3..e539c6cb636fd5d429f30c50fba98e1a6512b70c 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -931,6 +931,9 @@ static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev, u32 reg; rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1); + rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_STEP, 0); + rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0); rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, libconf->conf->long_frame_max_tx_count); rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, @@ -1619,7 +1622,8 @@ static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev, static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_OFF); + int mask = (state == STATE_RADIO_IRQ_OFF) || + (state == STATE_RADIO_IRQ_OFF_ISR); u32 reg; /* @@ -1736,7 +1740,9 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt61pci_toggle_rx(rt2x00dev, state); break; case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: + case STATE_RADIO_IRQ_OFF_ISR: rt61pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -1873,6 +1879,16 @@ static void rt61pci_write_beacon(struct queue_entry *entry, rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + /* + * Write the TX descriptor for the beacon. + */ + rt61pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); + + /* + * Dump beacon to userspace through debugfs. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); + /* * Write entire beacon with descriptor to register. */ @@ -2039,29 +2055,24 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) struct txdone_entry_desc txdesc; u32 word; u32 reg; - u32 old_reg; int type; int index; + int i; /* - * During each loop we will compare the freshly read - * STA_CSR4 register value with the value read from - * the previous loop. If the 2 values are equal then - * we should stop processing because the chance is - * quite big that the device has been unplugged and - * we risk going into an endless loop. + * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO + * at most X times and also stop processing once the TX_STA_FIFO_VALID + * flag is not set anymore. + * + * The legacy drivers use X=TX_RING_SIZE but state in a comment + * that the TX_STA_FIFO stack has a size of 16. We stick to our + * tx ring size for now. */ - old_reg = 0; - - while (1) { + for (i = 0; i < TX_ENTRIES; i++) { rt2x00pci_register_read(rt2x00dev, STA_CSR4, ®); if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) break; - if (old_reg == reg) - break; - old_reg = reg; - /* * Skip this entry when it contains an invalid * queue identication number. @@ -2120,6 +2131,13 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) } txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); + /* + * the frame was retried at least once + * -> hw used fallback rates + */ + if (txdesc.retry) + __set_bit(TXDONE_FALLBACK, &txdesc.flags); + rt2x00lib_txdone(entry, &txdesc); } } @@ -2132,27 +2150,11 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } -static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) +static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg_mcu; - u32 reg; - - /* - * Get the interrupt sources & saved to local variable. - * Write register value back to clear pending interrupts. - */ - rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®_mcu); - rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu); - - rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); - rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); - - if (!reg && !reg_mcu) - return IRQ_NONE; - - if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) - return IRQ_HANDLED; + u32 reg = rt2x00dev->irqvalue[0]; + u32 reg_mcu = rt2x00dev->irqvalue[1]; /* * Handle interrupts, walk through all bits @@ -2185,9 +2187,51 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP)) rt61pci_wakeup(rt2x00dev); + /* + * 5 - Beacon done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE)) + rt2x00lib_beacondone(rt2x00dev); + + /* Enable interrupts again. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_ON_ISR); return IRQ_HANDLED; } + +static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg_mcu; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®_mcu); + rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu); + + rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + if (!reg && !reg_mcu) + return IRQ_NONE; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* Store irqvalues for use in the interrupt thread. */ + rt2x00dev->irqvalue[0] = reg; + rt2x00dev->irqvalue[1] = reg_mcu; + + /* Disable interrupts, will be enabled again in the interrupt thread. */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_IRQ_OFF_ISR); + return IRQ_WAKE_THREAD; +} + /* * Device probe functions. */ @@ -2576,6 +2620,18 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); + /* + * As rt61 has a global fallback table we cannot specify + * more then one tx rate per frame but since the hw will + * try several rates (based on the fallback table) we should + * still initialize max_rates to the maximum number of rates + * we are going to try. Otherwise mac80211 will truncate our + * reported tx rates and the rc algortihm will end up with + * incorrect data. + */ + rt2x00dev->hw->max_rates = 7; + rt2x00dev->hw->max_rate_tries = 1; + /* * Initialize hw_mode information. */ @@ -2657,6 +2713,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); if (!modparam_nohwcrypt) __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); /* * Set the rssi offset. @@ -2748,8 +2805,9 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = { .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, - .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, + .sw_scan_start = rt2x00mac_sw_scan_start, + .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt61pci_conf_tx, @@ -2759,6 +2817,7 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = { static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { .irq_handler = rt61pci_interrupt, + .irq_handler_thread = rt61pci_interrupt_thread, .probe_hw = rt61pci_probe_hw, .get_firmware_name = rt61pci_get_firmware_name, .check_firmware = rt61pci_check_firmware, @@ -2773,7 +2832,6 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { .reset_tuner = rt61pci_reset_tuner, .link_tuner = rt61pci_link_tuner, .write_tx_desc = rt61pci_write_tx_desc, - .write_tx_data = rt2x00pci_write_tx_data, .write_beacon = rt61pci_write_beacon, .kick_tx_queue = rt61pci_kick_tx_queue, .kill_tx_queue = rt61pci_kill_tx_queue, diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h index df80f1af22a43208abbb4ee84ab6319dd3747186..e2e728ab0b2e747ca88486eceeaf73ce869a4ab7 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.h +++ b/drivers/net/wireless/rt2x00/rt61pci.h @@ -153,13 +153,13 @@ struct hw_key_entry { u8 key[16]; u8 tx_mic[8]; u8 rx_mic[8]; -} __attribute__ ((packed)); +} __packed; struct hw_pairwise_ta_entry { u8 address[6]; u8 cipher; u8 reserved; -} __attribute__ ((packed)); +} __packed; /* * Other on-chip shared memory space. diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 6e0d82efe9241802d8fdea54eacfe9f686437f0d..aa9de18fd410f016c452008e7852abcf5adc937c 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -270,7 +270,6 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev, { struct hw_key_entry key_entry; struct rt2x00_field32 field; - int timeout; u32 mask; u32 reg; @@ -306,12 +305,8 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev, sizeof(key_entry.rx_mic)); reg = SHARED_KEY_ENTRY(key->hw_key_idx); - timeout = REGISTER_TIMEOUT32(sizeof(key_entry)); - rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, - USB_VENDOR_REQUEST_OUT, reg, - &key_entry, - sizeof(key_entry), - timeout); + rt2x00usb_register_multiwrite(rt2x00dev, reg, + &key_entry, sizeof(key_entry)); /* * The cipher types are stored over 2 registers. @@ -372,7 +367,6 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev, { struct hw_pairwise_ta_entry addr_entry; struct hw_key_entry key_entry; - int timeout; u32 mask; u32 reg; @@ -407,17 +401,11 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev, sizeof(key_entry.rx_mic)); reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); - timeout = REGISTER_TIMEOUT32(sizeof(key_entry)); - rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, - USB_VENDOR_REQUEST_OUT, reg, - &key_entry, - sizeof(key_entry), - timeout); + rt2x00usb_register_multiwrite(rt2x00dev, reg, + &key_entry, sizeof(key_entry)); /* * Send the address and cipher type to the hardware register. - * This data fits within the CSR cache size, so we can use - * rt2x00usb_register_multiwrite() directly. */ memset(&addr_entry, 0, sizeof(addr_entry)); memcpy(&addr_entry, crypto->address, ETH_ALEN); @@ -828,6 +816,9 @@ static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev, u32 reg; rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1); + rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_STEP, 0); + rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0); rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, libconf->conf->long_frame_max_tx_count); rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, @@ -1092,11 +1083,7 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, /* * Write firmware to device. */ - rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, - USB_VENDOR_REQUEST_OUT, - FIRMWARE_IMAGE_BASE, - data, len, - REGISTER_TIMEOUT32(len)); + rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len); /* * Send firmware request to device to load firmware, @@ -1413,7 +1400,9 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, rt73usb_toggle_rx(rt2x00dev, state); break; case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: + case STATE_RADIO_IRQ_OFF_ISR: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: @@ -1442,7 +1431,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); - __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE); + __le32 *txd = (__le32 *) skb->data; u32 word; /* @@ -1505,6 +1494,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, /* * Register descriptor details in skb frame descriptor. */ + skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } @@ -1528,18 +1518,27 @@ static void rt73usb_write_beacon(struct queue_entry *entry, rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); /* - * Take the descriptor in front of the skb into account. + * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); + memset(entry->skb->data, 0, TXD_DESC_SIZE); + + /* + * Write the TX descriptor for the beacon. + */ + rt73usb_write_tx_desc(rt2x00dev, entry->skb, txdesc); + + /* + * Dump beacon to userspace through debugfs. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); /* * Write entire beacon with descriptor to register. */ beacon_base = HW_BEACON_OFFSET(entry->entry_idx); - rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, - USB_VENDOR_REQUEST_OUT, beacon_base, - entry->skb->data, entry->skb->len, - REGISTER_TIMEOUT32(entry->skb->len)); + rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, + entry->skb->data, entry->skb->len); /* * Enable beaconing again. @@ -2138,6 +2137,8 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); if (!modparam_nohwcrypt) __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); /* * Set the rssi offset. @@ -2231,6 +2232,8 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = { .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, + .sw_scan_start = rt2x00mac_sw_scan_start, + .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt73usb_conf_tx, @@ -2251,8 +2254,8 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { .link_stats = rt73usb_link_stats, .reset_tuner = rt73usb_reset_tuner, .link_tuner = rt73usb_link_tuner, + .watchdog = rt2x00usb_watchdog, .write_tx_desc = rt73usb_write_tx_desc, - .write_tx_data = rt2x00usb_write_tx_data, .write_beacon = rt73usb_write_beacon, .get_tx_data_len = rt73usb_get_tx_data_len, .kick_tx_queue = rt2x00usb_kick_tx_queue, diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h index 7abe7eb14555d7e88ed205ca2eb90c1547c90928..44d5b2bebd399c0645956157d25edefc8c444110 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.h +++ b/drivers/net/wireless/rt2x00/rt73usb.h @@ -108,13 +108,13 @@ struct hw_key_entry { u8 key[16]; u8 tx_mic[8]; u8 rx_mic[8]; -} __attribute__ ((packed)); +} __packed; struct hw_pairwise_ta_entry { u8 address[6]; u8 cipher; u8 reserved; -} __attribute__ ((packed)); +} __packed; /* * Since NULL frame won't be that long (256 byte), diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h index 4baf0cf0826fef967a131807eacbd83446b57689..30523314da43882a09e2d5bb4511ca0e4c57af6f 100644 --- a/drivers/net/wireless/rtl818x/rtl8180.h +++ b/drivers/net/wireless/rtl818x/rtl8180.h @@ -36,7 +36,7 @@ struct rtl8180_tx_desc { u8 agc; u8 flags2; u32 reserved[2]; -} __attribute__ ((packed)); +} __packed; struct rtl8180_rx_desc { __le32 flags; @@ -45,7 +45,7 @@ struct rtl8180_rx_desc { __le32 rx_buf; __le64 tsft; }; -} __attribute__ ((packed)); +} __packed; struct rtl8180_tx_ring { struct rtl8180_tx_desc *desc; diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index 515817de29058483c25d00d7537de67fd9b8776a..1d8178563d76205d063d0147bd837d2d6d7ab41a 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -103,6 +103,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) { struct rtl8180_priv *priv = dev->priv; unsigned int count = 32; + u8 signal, agc, sq; while (count--) { struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; @@ -130,10 +131,18 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) skb_put(skb, flags & 0xFFF); rx_status.antenna = (flags2 >> 15) & 1; - /* TODO: improve signal/rssi reporting */ - rx_status.signal = (flags2 >> 8) & 0x7F; - /* XXX: is this correct? */ rx_status.rate_idx = (flags >> 20) & 0xF; + agc = (flags2 >> 17) & 0x7F; + if (priv->r8185) { + if (rx_status.rate_idx > 3) + signal = 90 - clamp_t(u8, agc, 25, 90); + else + signal = 95 - clamp_t(u8, agc, 30, 95); + } else { + sq = flags2 & 0xff; + signal = priv->rf->calc_rssi(agc, sq); + } + rx_status.signal = signal; rx_status.freq = dev->conf.channel->center_freq; rx_status.band = dev->conf.channel->band; rx_status.mactime = le64_to_cpu(entry->tsft); @@ -352,7 +361,7 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev) /* check success of reset */ if (rtl818x_ioread8(priv, &priv->map->CMD) & RTL818X_CMD_RESET) { - printk(KERN_ERR "%s: reset timeout!\n", wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "reset timeout!\n"); return -ETIMEDOUT; } @@ -436,8 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) &priv->rx_ring_dma); if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { - printk(KERN_ERR "%s: Cannot allocate RX ring\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "cannot allocate rx ring\n"); return -ENOMEM; } @@ -494,8 +502,8 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev, ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); if (!ring || (unsigned long)ring & 0xFF) { - printk(KERN_ERR "%s: Cannot allocate TX ring (prio = %d)\n", - wiphy_name(dev->wiphy), prio); + wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n", + prio); return -ENOMEM; } @@ -560,8 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev) ret = request_irq(priv->pdev->irq, rtl8180_interrupt, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) { - printk(KERN_ERR "%s: failed to register IRQ handler\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "failed to register irq handler\n"); goto err_free_rings; } @@ -671,7 +678,7 @@ static u64 rtl8180_get_tsf(struct ieee80211_hw *dev) (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32; } -void rtl8180_beacon_work(struct work_struct *work) +static void rtl8180_beacon_work(struct work_struct *work) { struct rtl8180_vif *vif_priv = container_of(work, struct rtl8180_vif, beacon_work.work); @@ -1098,9 +1105,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev, goto err_iounmap; } - printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n", - wiphy_name(dev->wiphy), mac_addr, - chip_name, priv->rf->name); + wiphy_info(dev->wiphy, "hwaddr %pm, %s + %s\n", + mac_addr, chip_name, priv->rf->name); return 0; diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c index 947ee55f18b2c098f2688d1b6a8886815d9f9314..5cab9dfa8c07c1d5d88be64fd812e6eb3d0a3100 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c +++ b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c @@ -69,6 +69,15 @@ static void grf5101_write_phy_antenna(struct ieee80211_hw *dev, short chan) rtl8180_write_phy(dev, 0x10, ant); } +static u8 grf5101_rf_calc_rssi(u8 agc, u8 sq) +{ + if (agc > 60) + return 65; + + /* TODO(?): just return agc (or agc + 5) to avoid mult / div */ + return 65 * agc / 60; +} + static void grf5101_rf_set_channel(struct ieee80211_hw *dev, struct ieee80211_conf *conf) { @@ -176,5 +185,6 @@ const struct rtl818x_rf_ops grf5101_rf_ops = { .name = "GCT", .init = grf5101_rf_init, .stop = grf5101_rf_stop, - .set_chan = grf5101_rf_set_channel + .set_chan = grf5101_rf_set_channel, + .calc_rssi = grf5101_rf_calc_rssi, }; diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.c b/drivers/net/wireless/rtl818x/rtl8180_max2820.c index 6c825fd7f3b6f86f0b677860980a852d5ff918e8..16c4655181c03fa680773beafce29d05ad819b87 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_max2820.c +++ b/drivers/net/wireless/rtl818x/rtl8180_max2820.c @@ -74,6 +74,22 @@ static void max2820_write_phy_antenna(struct ieee80211_hw *dev, short chan) rtl8180_write_phy(dev, 0x10, ant); } +static u8 max2820_rf_calc_rssi(u8 agc, u8 sq) +{ + bool odd; + + odd = !!(agc & 1); + + agc >>= 1; + if (odd) + agc += 76; + else + agc += 66; + + /* TODO: change addends above to avoid mult / div below */ + return 65 * agc / 100; +} + static void max2820_rf_set_channel(struct ieee80211_hw *dev, struct ieee80211_conf *conf) { @@ -148,5 +164,6 @@ const struct rtl818x_rf_ops max2820_rf_ops = { .name = "Maxim", .init = max2820_rf_init, .stop = max2820_rf_stop, - .set_chan = max2820_rf_set_channel + .set_chan = max2820_rf_set_channel, + .calc_rssi = max2820_rf_calc_rssi, }; diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c index 4d2be0d9672b58b529ab6570f5c940a6bb78c9ad..69e4d4745dae76c4c00d719a51d9a320e941b917 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c @@ -50,7 +50,10 @@ static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data) udelay(10); for (i = 15; i >= 0; i--) { - u16 reg = reg80 | !!(bangdata & (1 << i)); + u16 reg = reg80; + + if (bangdata & (1 << i)) + reg |= 1; if (i & 1) rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c index cea4e0ccb92dc17c509a90ce0da211112b0842cc..d064fcc5ec08e2353c68c12acbdd3f19b7459e08 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c +++ b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c @@ -76,6 +76,31 @@ static void sa2400_write_phy_antenna(struct ieee80211_hw *dev, short chan) } +static u8 sa2400_rf_rssi_map[] = { + 0x64, 0x64, 0x63, 0x62, 0x61, 0x60, 0x5f, 0x5e, + 0x5d, 0x5c, 0x5b, 0x5a, 0x57, 0x54, 0x52, 0x50, + 0x4e, 0x4c, 0x4a, 0x48, 0x46, 0x44, 0x41, 0x3f, + 0x3c, 0x3a, 0x37, 0x36, 0x36, 0x1c, 0x1c, 0x1b, + 0x1b, 0x1a, 0x1a, 0x19, 0x19, 0x18, 0x18, 0x17, + 0x17, 0x16, 0x16, 0x15, 0x15, 0x14, 0x14, 0x13, + 0x13, 0x12, 0x12, 0x11, 0x11, 0x10, 0x10, 0x0f, + 0x0f, 0x0e, 0x0e, 0x0d, 0x0d, 0x0c, 0x0c, 0x0b, + 0x0b, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x07, + 0x07, 0x06, 0x06, 0x05, 0x04, 0x03, 0x02, +}; + +static u8 sa2400_rf_calc_rssi(u8 agc, u8 sq) +{ + if (sq == 0x80) + return 1; + + if (sq > 78) + return 32; + + /* TODO: recalc sa2400_rf_rssi_map to avoid mult / div */ + return 65 * sa2400_rf_rssi_map[sq] / 100; +} + static void sa2400_rf_set_channel(struct ieee80211_hw *dev, struct ieee80211_conf *conf) { @@ -198,5 +223,6 @@ const struct rtl818x_rf_ops sa2400_rf_ops = { .name = "Philips", .init = sa2400_rf_init, .stop = sa2400_rf_stop, - .set_chan = sa2400_rf_set_channel + .set_chan = sa2400_rf_set_channel, + .calc_rssi = sa2400_rf_calc_rssi, }; diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h index 6bb32112e65c16b8b3255b4cd45eb531ea0f4672..98878160a65ae056cfc4db48fbc0ee4ab9f71774 100644 --- a/drivers/net/wireless/rtl818x/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187.h @@ -47,7 +47,7 @@ struct rtl8187_rx_hdr { u8 agc; u8 reserved; __le64 mac_time; -} __attribute__((packed)); +} __packed; struct rtl8187b_rx_hdr { __le32 flags; @@ -59,7 +59,7 @@ struct rtl8187b_rx_hdr { __le16 snr_long2end; s8 pwdb_g12; u8 fot; -} __attribute__((packed)); +} __packed; /* {rtl8187,rtl8187b}_tx_info is in skb */ @@ -68,7 +68,7 @@ struct rtl8187_tx_hdr { __le16 rts_duration; __le16 len; __le32 retry; -} __attribute__((packed)); +} __packed; struct rtl8187b_tx_hdr { __le32 flags; @@ -80,7 +80,7 @@ struct rtl8187b_tx_hdr { __le32 unused_3; __le32 retry; __le32 unused_4[2]; -} __attribute__((packed)); +} __packed; enum { DEVICE_RTL8187, diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 891b8490e34978b0d0bbb71b777a9da617f35cb4..5738a55c1b06b4c9bd7a8c30323266b0af2151b4 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev) } while (--i); if (!i) { - printk(KERN_ERR "%s: Reset timeout!\n", wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "reset timeout!\n"); return -ETIMEDOUT; } @@ -589,8 +589,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev) } while (--i); if (!i) { - printk(KERN_ERR "%s: eeprom reset timeout!\n", - wiphy_name(dev->wiphy)); + wiphy_err(dev->wiphy, "eeprom reset timeout!\n"); return -ETIMEDOUT; } @@ -1527,9 +1526,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, mutex_init(&priv->conf_mutex); skb_queue_head_init(&priv->b_tx_status.queue); - printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n", - wiphy_name(dev->wiphy), mac_addr, - chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); + wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n", + mac_addr, chip_name, priv->asic_rev, priv->rf->name, + priv->rfkill_mask); #ifdef CONFIG_RTL8187_LEDS eeprom_93cx6_read(&eeprom, 0x3F, ®); diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c index a09819386a1e503d3a600c25941bfec360804668..fd96f9112322439f7bf3f72716f943459a9c347c 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c @@ -366,8 +366,8 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev) rtl8225_write(dev, 0x02, 0x044d); msleep(100); if (!(rtl8225_read(dev, 6) & (1 << 7))) - printk(KERN_WARNING "%s: RF Calibration Failed! %x\n", - wiphy_name(dev->wiphy), rtl8225_read(dev, 6)); + wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", + rtl8225_read(dev, 6)); } rtl8225_write(dev, 0x0, 0x127); @@ -735,8 +735,8 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev) rtl8225_write(dev, 0x02, 0x044D); msleep(100); if (!(rtl8225_read(dev, 6) & (1 << 7))) - printk(KERN_WARNING "%s: RF Calibration Failed! %x\n", - wiphy_name(dev->wiphy), rtl8225_read(dev, 6)); + wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", + rtl8225_read(dev, 6)); } msleep(200); diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h index 8522490d2e298af114e020c59fad8388853830c3..1615f63b02f6128f781364f86a8780b7bdaf9c21 100644 --- a/drivers/net/wireless/rtl818x/rtl818x.h +++ b/drivers/net/wireless/rtl818x/rtl818x.h @@ -185,7 +185,7 @@ struct rtl818x_csr { u8 reserved_22[4]; __le16 TALLY_CNT; u8 TALLY_SEL; -} __attribute__((packed)); +} __packed; struct rtl818x_rf_ops { char *name; @@ -193,6 +193,7 @@ struct rtl818x_rf_ops { void (*stop)(struct ieee80211_hw *); void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *); + u8 (*calc_rssi)(u8 agc, u8 sq); }; /** diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig index 337fc7bec5a5056ee9bbb73ba6ff16417d704b75..2f98058be4510f217f17c5f09a0f832d5c5e8198 100644 --- a/drivers/net/wireless/wl12xx/Kconfig +++ b/drivers/net/wireless/wl12xx/Kconfig @@ -41,7 +41,7 @@ config WL1251_SDIO config WL1271 tristate "TI wl1271 support" - depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS + depends on WL12XX && GENERIC_HARDIRQS depends on INET select FW_LOADER select CRC7 @@ -65,7 +65,7 @@ config WL1271_SPI config WL1271_SDIO tristate "TI wl1271 SDIO support" - depends on WL1271 && MMC && ARM + depends on WL1271 && MMC ---help--- This module adds support for the SDIO interface of adapters using TI wl1271 chipset. Select this if your platform is using diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile index 27ddd2be0a9168da0bf63f3787e4ac30432fe8cb..078b4398ac1f6e46f6a65158c2a45a1f4c50aedf 100644 --- a/drivers/net/wireless/wl12xx/Makefile +++ b/drivers/net/wireless/wl12xx/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \ wl1271_event.o wl1271_tx.o wl1271_rx.o \ wl1271_ps.o wl1271_acx.o wl1271_boot.o \ - wl1271_init.o wl1271_debugfs.o + wl1271_init.o wl1271_debugfs.o wl1271_scan.o wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o obj-$(CONFIG_WL1271) += wl1271.o diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h index 4f5f02a26e6272887fabf4f896915f8b916c782f..6b942a28e6a5dd02e87cebd8f20273e234cdea10 100644 --- a/drivers/net/wireless/wl12xx/wl1251.h +++ b/drivers/net/wireless/wl12xx/wl1251.h @@ -381,6 +381,9 @@ struct wl1251 { u32 chip_id; char fw_ver[21]; + + /* Most recently reported noise in dBm */ + s8 noise; }; int wl1251_plt_start(struct wl1251 *wl); diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h index 26160c45784cbdae95eb253c1776b2dd796ca75c..842df310d92a61554f44184273b3470608b9a165 100644 --- a/drivers/net/wireless/wl12xx/wl1251_acx.h +++ b/drivers/net/wireless/wl12xx/wl1251_acx.h @@ -60,7 +60,7 @@ struct acx_error_counter { /* the number of missed sequence numbers in the squentially */ /* values of frames seq numbers */ u32 seq_num_miss; -} __attribute__ ((packed)); +} __packed; struct acx_revision { struct acx_header header; @@ -89,7 +89,7 @@ struct acx_revision { * bits 24 - 31: Chip ID - The WiLink chip ID. */ u32 hw_version; -} __attribute__ ((packed)); +} __packed; enum wl1251_psm_mode { /* Active mode */ @@ -111,7 +111,7 @@ struct acx_sleep_auth { /* 2 - ELP mode: Deep / Max sleep*/ u8 sleep_auth; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; enum { HOSTIF_PCI_MASTER_HOST_INDIRECT, @@ -159,7 +159,7 @@ struct acx_data_path_params { * complete ring until an interrupt is generated. */ u32 tx_complete_timeout; -} __attribute__ ((packed)); +} __packed; struct acx_data_path_params_resp { @@ -180,7 +180,7 @@ struct acx_data_path_params_resp { u32 tx_control_addr; u32 tx_complete_addr; -} __attribute__ ((packed)); +} __packed; #define TX_MSDU_LIFETIME_MIN 0 #define TX_MSDU_LIFETIME_MAX 3000 @@ -197,7 +197,7 @@ struct acx_rx_msdu_lifetime { * firmware discards the MSDU. */ u32 lifetime; -} __attribute__ ((packed)); +} __packed; /* * RX Config Options Table @@ -285,7 +285,7 @@ struct acx_rx_config { u32 config_options; u32 filter_options; -} __attribute__ ((packed)); +} __packed; enum { QOS_AC_BE = 0, @@ -325,13 +325,13 @@ struct acx_tx_queue_qos_config { /* Lowest memory blocks guaranteed for this queue */ u16 low_threshold; -} __attribute__ ((packed)); +} __packed; struct acx_packet_detection { struct acx_header header; u32 threshold; -} __attribute__ ((packed)); +} __packed; enum acx_slot_type { @@ -349,7 +349,7 @@ struct acx_slot { u8 wone_index; /* Reserved */ u8 slot_time; u8 reserved[6]; -} __attribute__ ((packed)); +} __packed; #define ADDRESS_GROUP_MAX (8) @@ -362,7 +362,7 @@ struct acx_dot11_grp_addr_tbl { u8 num_groups; u8 pad[2]; u8 mac_table[ADDRESS_GROUP_MAX_LEN]; -} __attribute__ ((packed)); +} __packed; #define RX_TIMEOUT_PS_POLL_MIN 0 @@ -388,7 +388,7 @@ struct acx_rx_timeout { * from an UPSD enabled queue. */ u16 upsd_timeout; -} __attribute__ ((packed)); +} __packed; #define RTS_THRESHOLD_MIN 0 #define RTS_THRESHOLD_MAX 4096 @@ -399,7 +399,7 @@ struct acx_rts_threshold { u16 threshold; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct acx_beacon_filter_option { struct acx_header header; @@ -415,7 +415,7 @@ struct acx_beacon_filter_option { */ u8 max_num_beacons; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; /* * ACXBeaconFilterEntry (not 221) @@ -461,7 +461,7 @@ struct acx_beacon_filter_ie_table { u8 num_ie; u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; u8 pad[3]; -} __attribute__ ((packed)); +} __packed; #define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ #define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */ @@ -494,7 +494,7 @@ struct acx_bt_wlan_coex { */ u8 enable; u8 pad[3]; -} __attribute__ ((packed)); +} __packed; #define PTA_ANTENNA_TYPE_DEF (0) #define PTA_BT_HP_MAXTIME_DEF (2000) @@ -648,7 +648,7 @@ struct acx_bt_wlan_coex_param { /* range: 0 - 20 default: 1 */ u8 bt_hp_respected_num; -} __attribute__ ((packed)); +} __packed; #define CCA_THRSH_ENABLE_ENERGY_D 0x140A #define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF @@ -660,7 +660,7 @@ struct acx_energy_detection { u16 rx_cca_threshold; u8 tx_energy_detection; u8 pad; -} __attribute__ ((packed)); +} __packed; #define BCN_RX_TIMEOUT_DEF_VALUE 10000 #define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 @@ -679,14 +679,14 @@ struct acx_beacon_broadcast { /* Consecutive PS Poll failures before updating the host */ u8 ps_poll_threshold; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct acx_event_mask { struct acx_header header; u32 event_mask; u32 high_event_mask; /* Unused */ -} __attribute__ ((packed)); +} __packed; #define CFG_RX_FCS BIT(2) #define CFG_RX_ALL_GOOD BIT(3) @@ -729,7 +729,7 @@ struct acx_fw_gen_frame_rates { u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */ u8 tx_mgt_frame_rate; u8 tx_mgt_frame_mod; -} __attribute__ ((packed)); +} __packed; /* STA MAC */ struct acx_dot11_station_id { @@ -737,28 +737,28 @@ struct acx_dot11_station_id { u8 mac[ETH_ALEN]; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct acx_feature_config { struct acx_header header; u32 options; u32 data_flow_options; -} __attribute__ ((packed)); +} __packed; struct acx_current_tx_power { struct acx_header header; u8 current_tx_power; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; struct acx_dot11_default_key { struct acx_header header; u8 id; u8 pad[3]; -} __attribute__ ((packed)); +} __packed; struct acx_tsf_info { struct acx_header header; @@ -769,7 +769,7 @@ struct acx_tsf_info { u32 last_TBTT_lsb; u8 last_dtim_count; u8 pad[3]; -} __attribute__ ((packed)); +} __packed; enum acx_wake_up_event { WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ @@ -785,7 +785,7 @@ struct acx_wake_up_condition { u8 wake_up_event; /* Only one bit can be set */ u8 listen_interval; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct acx_aid { struct acx_header header; @@ -795,7 +795,7 @@ struct acx_aid { */ u16 aid; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; enum acx_preamble_type { ACX_PREAMBLE_LONG = 0, @@ -811,7 +811,7 @@ struct acx_preamble { */ u8 preamble; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; enum acx_ctsprotect_type { CTSPROTECT_DISABLE = 0, @@ -822,11 +822,11 @@ struct acx_ctsprotect { struct acx_header header; u8 ctsprotect; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; struct acx_tx_statistics { u32 internal_desc_overflow; -} __attribute__ ((packed)); +} __packed; struct acx_rx_statistics { u32 out_of_mem; @@ -837,14 +837,14 @@ struct acx_rx_statistics { u32 xfr_hint_trig; u32 path_reset; u32 reset_counter; -} __attribute__ ((packed)); +} __packed; struct acx_dma_statistics { u32 rx_requested; u32 rx_errors; u32 tx_requested; u32 tx_errors; -} __attribute__ ((packed)); +} __packed; struct acx_isr_statistics { /* host command complete */ @@ -903,7 +903,7 @@ struct acx_isr_statistics { /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ u32 low_rssi; -} __attribute__ ((packed)); +} __packed; struct acx_wep_statistics { /* WEP address keys configured */ @@ -925,7 +925,7 @@ struct acx_wep_statistics { /* WEP decrypt interrupts */ u32 interrupt; -} __attribute__ ((packed)); +} __packed; #define ACX_MISSED_BEACONS_SPREAD 10 @@ -985,12 +985,12 @@ struct acx_pwr_statistics { /* the number of beacons in awake mode */ u32 rcvd_awake_beacons; -} __attribute__ ((packed)); +} __packed; struct acx_mic_statistics { u32 rx_pkts; u32 calc_failure; -} __attribute__ ((packed)); +} __packed; struct acx_aes_statistics { u32 encrypt_fail; @@ -999,7 +999,7 @@ struct acx_aes_statistics { u32 decrypt_packets; u32 encrypt_interrupt; u32 decrypt_interrupt; -} __attribute__ ((packed)); +} __packed; struct acx_event_statistics { u32 heart_beat; @@ -1010,7 +1010,7 @@ struct acx_event_statistics { u32 oom_late; u32 phy_transmit_error; u32 tx_stuck; -} __attribute__ ((packed)); +} __packed; struct acx_ps_statistics { u32 pspoll_timeouts; @@ -1020,7 +1020,7 @@ struct acx_ps_statistics { u32 pspoll_max_apturn; u32 pspoll_utilization; u32 upsd_utilization; -} __attribute__ ((packed)); +} __packed; struct acx_rxpipe_statistics { u32 rx_prep_beacon_drop; @@ -1028,7 +1028,7 @@ struct acx_rxpipe_statistics { u32 beacon_buffer_thres_host_int_trig_rx_data; u32 missed_beacon_host_int_trig_rx_data; u32 tx_xfr_host_int_trig_rx_data; -} __attribute__ ((packed)); +} __packed; struct acx_statistics { struct acx_header header; @@ -1044,7 +1044,7 @@ struct acx_statistics { struct acx_event_statistics event; struct acx_ps_statistics ps; struct acx_rxpipe_statistics rxpipe; -} __attribute__ ((packed)); +} __packed; #define ACX_MAX_RATE_CLASSES 8 #define ACX_RATE_MASK_UNSPECIFIED 0 @@ -1063,7 +1063,7 @@ struct acx_rate_policy { u32 rate_class_cnt; struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; -} __attribute__ ((packed)); +} __packed; struct wl1251_acx_memory { __le16 num_stations; /* number of STAs to be supported. */ @@ -1082,7 +1082,7 @@ struct wl1251_acx_memory { u8 tx_min_mem_block_num; u8 num_ssid_profiles; __le16 debug_buffer_size; -} __attribute__ ((packed)); +} __packed; #define ACX_RX_DESC_MIN 1 @@ -1094,7 +1094,7 @@ struct wl1251_acx_rx_queue_config { u8 type; u8 priority; __le32 dma_address; -} __attribute__ ((packed)); +} __packed; #define ACX_TX_DESC_MIN 1 #define ACX_TX_DESC_MAX 127 @@ -1103,7 +1103,7 @@ struct wl1251_acx_tx_queue_config { u8 num_descs; u8 pad[2]; u8 attributes; -} __attribute__ ((packed)); +} __packed; #define MAX_TX_QUEUE_CONFIGS 5 #define MAX_TX_QUEUES 4 @@ -1113,7 +1113,7 @@ struct wl1251_acx_config_memory { struct wl1251_acx_memory mem_config; struct wl1251_acx_rx_queue_config rx_queue_config; struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS]; -} __attribute__ ((packed)); +} __packed; struct wl1251_acx_mem_map { struct acx_header header; @@ -1147,7 +1147,7 @@ struct wl1251_acx_mem_map { /* Number of blocks FW allocated for RX packets */ u32 num_rx_mem_blocks; -} __attribute__ ((packed)); +} __packed; struct wl1251_acx_wr_tbtt_and_dtim { @@ -1164,7 +1164,7 @@ struct wl1251_acx_wr_tbtt_and_dtim { */ u8 dtim; u8 padding; -} __attribute__ ((packed)); +} __packed; struct wl1251_acx_ac_cfg { struct acx_header header; @@ -1194,7 +1194,7 @@ struct wl1251_acx_ac_cfg { /* The TX Op Limit (in microseconds) for the access class. */ u16 txop_limit; -} __attribute__ ((packed)); +} __packed; enum wl1251_acx_channel_type { @@ -1245,7 +1245,7 @@ struct wl1251_acx_tid_cfg { /* not supported */ u32 apsdconf[2]; -} __attribute__ ((packed)); +} __packed; /************************************************************************* diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c index 2545123931e85357e152f9ffc6dfc35a8834e873..65e0416be5b6975642ea589a105b39bda95b5e32 100644 --- a/drivers/net/wireless/wl12xx/wl1251_boot.c +++ b/drivers/net/wireless/wl12xx/wl1251_boot.c @@ -225,7 +225,7 @@ static void wl1251_boot_set_ecpu_ctrl(struct wl1251 *wl, u32 flag) int wl1251_boot_run_firmware(struct wl1251 *wl) { int loop, ret; - u32 chip_id, interrupt; + u32 chip_id, acx_intr; wl1251_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); @@ -242,15 +242,15 @@ int wl1251_boot_run_firmware(struct wl1251 *wl) loop = 0; while (loop++ < INIT_LOOP) { udelay(INIT_LOOP_DELAY); - interrupt = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + acx_intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); - if (interrupt == 0xffffffff) { + if (acx_intr == 0xffffffff) { wl1251_error("error reading hardware complete " "init indication"); return -EIO; } /* check that ACX_INTR_INIT_COMPLETE is enabled */ - else if (interrupt & WL1251_ACX_INTR_INIT_COMPLETE) { + else if (acx_intr & WL1251_ACX_INTR_INIT_COMPLETE) { wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK, WL1251_ACX_INTR_INIT_COMPLETE); break; diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h index 4ad67cae94d2a6fedf57bb6c94d01276ab06ee84..a9e4991369be5d47d3a1de2f9029649476b324f0 100644 --- a/drivers/net/wireless/wl12xx/wl1251_cmd.h +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h @@ -106,7 +106,7 @@ struct wl1251_cmd_header { u16 status; /* payload */ u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct wl1251_command { struct wl1251_cmd_header header; @@ -175,8 +175,8 @@ struct cmd_read_write_memory { #define WL1251_SCAN_NUM_PROBES 3 struct wl1251_scan_parameters { - u32 rx_config_options; - u32 rx_filter_options; + __le32 rx_config_options; + __le32 rx_filter_options; /* * Scan options: @@ -186,7 +186,7 @@ struct wl1251_scan_parameters { * bit 2: voice mode, 0 for normal scan. * bit 3: scan priority, 1 for high priority. */ - u16 scan_options; + __le16 scan_options; /* Number of channels to scan */ u8 num_channels; @@ -195,17 +195,17 @@ struct wl1251_scan_parameters { u8 num_probe_requests; /* Rate and modulation for probe requests */ - u16 tx_rate; + __le16 tx_rate; u8 tid_trigger; u8 ssid_len; u8 ssid[32]; -} __attribute__ ((packed)); +} __packed; struct wl1251_scan_ch_parameters { - u32 min_duration; /* in TU */ - u32 max_duration; /* in TU */ + __le32 min_duration; /* in TU */ + __le32 max_duration; /* in TU */ u32 bssid_lsb; u16 bssid_msb; @@ -218,7 +218,7 @@ struct wl1251_scan_ch_parameters { u8 tx_power_att; u8 channel; u8 pad[3]; -} __attribute__ ((packed)); +} __packed; /* SCAN parameters */ #define SCAN_MAX_NUM_OF_CHANNELS 16 @@ -228,7 +228,7 @@ struct wl1251_cmd_scan { struct wl1251_scan_parameters params; struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; -} __attribute__ ((packed)); +} __packed; enum { BSS_TYPE_IBSS = 0, @@ -276,14 +276,14 @@ struct cmd_join { u8 tx_mgt_frame_rate; /* OBSOLETE */ u8 tx_mgt_frame_mod; /* OBSOLETE */ u8 reserved; -} __attribute__ ((packed)); +} __packed; struct cmd_enabledisable_path { struct wl1251_cmd_header header; u8 channel; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; #define WL1251_MAX_TEMPLATE_SIZE 300 @@ -292,7 +292,7 @@ struct wl1251_cmd_packet_template { __le16 size; u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define TIM_ELE_ID 5 #define PARTIAL_VBM_MAX 251 @@ -304,7 +304,7 @@ struct wl1251_tim { u8 dtim_period; u8 bitmap_ctrl; u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ -} __attribute__ ((packed)); +} __packed; /* Virtual Bit Map update */ struct wl1251_cmd_vbm_update { @@ -312,7 +312,7 @@ struct wl1251_cmd_vbm_update { __le16 len; u8 padding[2]; struct wl1251_tim tim; -} __attribute__ ((packed)); +} __packed; enum wl1251_cmd_ps_mode { STATION_ACTIVE_MODE, @@ -333,7 +333,7 @@ struct wl1251_cmd_ps_params { u8 hang_over_period; u16 null_data_rate; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct wl1251_cmd_trigger_scan_to { struct wl1251_cmd_header header; @@ -411,7 +411,7 @@ struct wl1251_cmd_set_keys { u8 key[MAX_KEY_SIZE]; u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; -} __attribute__ ((packed)); +} __packed; #endif /* __WL1251_CMD_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h index be0ac54d62468a9931a45a9179ccff94bdbe3a94..f48a2b66bc5a670b673b45d19a35c5b77b14de9b 100644 --- a/drivers/net/wireless/wl12xx/wl1251_event.h +++ b/drivers/net/wireless/wl12xx/wl1251_event.h @@ -82,7 +82,7 @@ struct event_debug_report { u32 report_1; u32 report_2; u32 report_3; -} __attribute__ ((packed)); +} __packed; struct event_mailbox { u32 events_vector; @@ -112,7 +112,7 @@ struct event_mailbox { struct event_debug_report report; u8 average_snr_level; u8 padding[19]; -} __attribute__ ((packed)); +} __packed; int wl1251_event_unmask(struct wl1251 *wl); void wl1251_event_mbox_config(struct wl1251 *wl); diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c index 00b24282fc738cad290466902ec12735e131cb50..861a5f33761e9ea94d15d355a360835a0766ade0 100644 --- a/drivers/net/wireless/wl12xx/wl1251_main.c +++ b/drivers/net/wireless/wl12xx/wl1251_main.c @@ -124,7 +124,7 @@ static int wl1251_fetch_nvs(struct wl1251 *wl) } wl->nvs_len = fw->size; - wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL); + wl->nvs = kmemdup(fw->data, wl->nvs_len, GFP_KERNEL); if (!wl->nvs) { wl1251_error("could not allocate memory for the nvs file"); @@ -132,8 +132,6 @@ static int wl1251_fetch_nvs(struct wl1251 *wl) goto out; } - memcpy(wl->nvs, fw->data, wl->nvs_len); - ret = 0; out: @@ -413,6 +411,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) static int wl1251_op_start(struct ieee80211_hw *hw) { struct wl1251 *wl = hw->priv; + struct wiphy *wiphy = hw->wiphy; int ret = 0; wl1251_debug(DEBUG_MAC80211, "mac80211 start"); @@ -446,6 +445,10 @@ static int wl1251_op_start(struct ieee80211_hw *hw) wl1251_info("firmware booted (%s)", wl->fw_ver); + /* update hw/fw version info in wiphy struct */ + wiphy->hw_version = wl->chip_id; + strncpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version)); + out: if (ret < 0) wl1251_power_off(wl); @@ -1174,6 +1177,22 @@ static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue, return ret; } +static int wl1251_op_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct wl1251 *wl = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + + if (idx != 0) + return -ENOENT; + + survey->channel = conf->channel; + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = wl->noise; + + return 0; +} + /* can't be const, mac80211 writes to this */ static struct ieee80211_supported_band wl1251_band_2ghz = { .channels = wl1251_channels, @@ -1195,6 +1214,7 @@ static const struct ieee80211_ops wl1251_ops = { .bss_info_changed = wl1251_op_bss_info_changed, .set_rts_threshold = wl1251_op_set_rts_threshold, .conf_tx = wl1251_op_conf_tx, + .get_survey = wl1251_op_get_survey, }; static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data) @@ -1419,5 +1439,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw); MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo "); -MODULE_ALIAS("spi:wl1251"); MODULE_FIRMWARE(WL1251_FW_NAME); diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c index 851515836a7f6217085ccbd5f32bf38798ab63ac..1b6294b3b996fd9f660d17c02bc1543b9bd428dd 100644 --- a/drivers/net/wireless/wl12xx/wl1251_rx.c +++ b/drivers/net/wireless/wl12xx/wl1251_rx.c @@ -74,6 +74,12 @@ static void wl1251_rx_status(struct wl1251 *wl, status->signal = desc->rssi; + /* + * FIXME: guessing that snr needs to be divided by two, otherwise + * the values don't make any sense + */ + wl->noise = desc->rssi - desc->snr / 2; + status->freq = ieee80211_channel_to_frequency(desc->channel); status->flag |= RX_FLAG_TSFT; diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h index 563a3fde40fb8ded986c4dc1f0d0f72917f6b125..da4e53406a0ead9810568785a1f8d5fd3a3c3b1f 100644 --- a/drivers/net/wireless/wl12xx/wl1251_rx.h +++ b/drivers/net/wireless/wl12xx/wl1251_rx.h @@ -117,7 +117,7 @@ struct wl1251_rx_descriptor { s8 rssi; /* in dB */ u8 rcpi; /* in dB */ u8 snr; /* in dB */ -} __attribute__ ((packed)); +} __packed; void wl1251_rx(struct wl1251 *wl); diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c index c561332e70092b18280fff1843f948b13603289a..b901b6135654f0ce00974411ad4f69628963f596 100644 --- a/drivers/net/wireless/wl12xx/wl1251_sdio.c +++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c @@ -37,11 +37,17 @@ #define SDIO_DEVICE_ID_TI_WL1251 0x9066 #endif +struct wl1251_sdio { + struct sdio_func *func; + u32 elp_val; +}; + static struct wl12xx_platform_data *wl12xx_board_data; static struct sdio_func *wl_to_func(struct wl1251 *wl) { - return wl->if_priv; + struct wl1251_sdio *wl_sdio = wl->if_priv; + return wl_sdio->func; } static void wl1251_sdio_interrupt(struct sdio_func *func) @@ -90,10 +96,17 @@ static void wl1251_sdio_write(struct wl1251 *wl, int addr, static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val) { int ret = 0; - struct sdio_func *func = wl_to_func(wl); - + struct wl1251_sdio *wl_sdio = wl->if_priv; + struct sdio_func *func = wl_sdio->func; + + /* + * The hardware only supports RAW (read after write) access for + * reading, regular sdio_readb won't work here (it interprets + * the unused bits of CMD52 as write data even if we send read + * request). + */ sdio_claim_host(func); - *val = sdio_readb(func, addr, &ret); + *val = sdio_writeb_readb(func, wl_sdio->elp_val, addr, &ret); sdio_release_host(func); if (ret) @@ -103,7 +116,8 @@ static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val) static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val) { int ret = 0; - struct sdio_func *func = wl_to_func(wl); + struct wl1251_sdio *wl_sdio = wl->if_priv; + struct sdio_func *func = wl_sdio->func; sdio_claim_host(func); sdio_writeb(func, val, addr, &ret); @@ -111,6 +125,8 @@ static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val) if (ret) wl1251_error("sdio_writeb failed (%d)", ret); + else + wl_sdio->elp_val = val; } static void wl1251_sdio_reset(struct wl1251 *wl) @@ -197,6 +213,7 @@ static int wl1251_sdio_probe(struct sdio_func *func, int ret; struct wl1251 *wl; struct ieee80211_hw *hw; + struct wl1251_sdio *wl_sdio; hw = wl1251_alloc_hw(); if (IS_ERR(hw)) @@ -204,6 +221,12 @@ static int wl1251_sdio_probe(struct sdio_func *func, wl = hw->priv; + wl_sdio = kzalloc(sizeof(*wl_sdio), GFP_KERNEL); + if (wl_sdio == NULL) { + ret = -ENOMEM; + goto out_free_hw; + } + sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) @@ -213,7 +236,8 @@ static int wl1251_sdio_probe(struct sdio_func *func, sdio_release_host(func); SET_IEEE80211_DEV(hw, &func->dev); - wl->if_priv = func; + wl_sdio->func = func; + wl->if_priv = wl_sdio; wl->if_ops = &wl1251_sdio_ops; wl->set_power = wl1251_sdio_set_power; @@ -259,6 +283,8 @@ static int wl1251_sdio_probe(struct sdio_func *func, sdio_disable_func(func); release: sdio_release_host(func); + kfree(wl_sdio); +out_free_hw: wl1251_free_hw(wl); return ret; } @@ -266,9 +292,11 @@ static int wl1251_sdio_probe(struct sdio_func *func, static void __devexit wl1251_sdio_remove(struct sdio_func *func) { struct wl1251 *wl = sdio_get_drvdata(func); + struct wl1251_sdio *wl_sdio = wl->if_priv; if (wl->irq) free_irq(wl->irq, wl); + kfree(wl_sdio); wl1251_free_hw(wl); sdio_claim_host(func); diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c index e81474203a23a9df145f9e27178d0fc26aadc225..27fdfaaeb0742c780d82ccaaf2138ed485675cec 100644 --- a/drivers/net/wireless/wl12xx/wl1251_spi.c +++ b/drivers/net/wireless/wl12xx/wl1251_spi.c @@ -345,3 +345,4 @@ module_exit(wl1251_spi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo "); +MODULE_ALIAS("spi:wl1251"); diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c index c8223185efd28129325dc46d52760dba7d5d6732..a38ec199187a99ee7ebb475d547b218b43b8f9fc 100644 --- a/drivers/net/wireless/wl12xx/wl1251_tx.c +++ b/drivers/net/wireless/wl12xx/wl1251_tx.c @@ -117,7 +117,7 @@ static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr) frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; tx_hdr->frag_threshold = cpu_to_le16(frag_threshold); - payload_len = tx_hdr->length + MAX_MSDU_SECURITY_LENGTH; + payload_len = le16_to_cpu(tx_hdr->length) + MAX_MSDU_SECURITY_LENGTH; if (payload_len > frag_threshold) { mem_blocks_per_frag = @@ -191,11 +191,13 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, if (control->control.hw_key && control->control.hw_key->alg == ALG_TKIP) { int hdrlen; - u16 fc; + __le16 fc; + u16 length; u8 *pos; - fc = *(u16 *)(skb->data + sizeof(*tx_hdr)); - tx_hdr->length += WL1251_TKIP_IV_SPACE; + fc = *(__le16 *)(skb->data + sizeof(*tx_hdr)); + length = le16_to_cpu(tx_hdr->length) + WL1251_TKIP_IV_SPACE; + tx_hdr->length = cpu_to_le16(length); hdrlen = ieee80211_hdrlen(fc); diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h index 55856c6bb97a85b18f8a65810288f88a61f08ccf..f40eeb37f5aaf590a84a989a3bfdd93636fe5205 100644 --- a/drivers/net/wireless/wl12xx/wl1251_tx.h +++ b/drivers/net/wireless/wl12xx/wl1251_tx.h @@ -109,12 +109,12 @@ struct tx_control { unsigned xfer_pad:1; unsigned reserved:7; -} __attribute__ ((packed)); +} __packed; struct tx_double_buffer_desc { /* Length of payload, including headers. */ - u16 length; + __le16 length; /* * A bit mask that specifies the initial rate to be used @@ -133,10 +133,10 @@ struct tx_double_buffer_desc { * 0x0800 - 48Mbits * 0x1000 - 54Mbits */ - u16 rate; + __le16 rate; /* Time in us that a packet can spend in the target */ - u32 expiry_time; + __le32 expiry_time; /* index of the TX queue used for this packet */ u8 xmit_queue; @@ -150,13 +150,13 @@ struct tx_double_buffer_desc { * The FW should cut the packet into fragments * of this size. */ - u16 frag_threshold; + __le16 frag_threshold; /* Numbers of HW queue blocks to be allocated */ u8 num_mem_blocks; u8 reserved; -} __attribute__ ((packed)); +} __packed; enum { TX_SUCCESS = 0, @@ -208,7 +208,7 @@ struct tx_result { /* See done_1 */ u8 done_2; -} __attribute__ ((packed)); +} __packed; static inline int wl1251_tx_get_queue(int queue) { diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h index 6f1b6b5640c00182f4f178d950aa270111ea4fdb..dd3cee6ea5bb6d486b27973d95f9fa5a35e842f1 100644 --- a/drivers/net/wireless/wl12xx/wl1271.h +++ b/drivers/net/wireless/wl12xx/wl1271.h @@ -33,6 +33,7 @@ #include #include "wl1271_conf.h" +#include "wl1271_ini.h" #define DRIVER_NAME "wl1271" #define DRIVER_PREFIX DRIVER_NAME ": " @@ -116,33 +117,6 @@ enum { #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) #define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) -/* NVS data structure */ -#define WL1271_NVS_SECTION_SIZE 468 - -#define WL1271_NVS_GENERAL_PARAMS_SIZE 57 -#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \ - (WL1271_NVS_GENERAL_PARAMS_SIZE + 1) -#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE 17 -#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \ - (WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1) -#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE 65 -#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \ - (WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1) -#define WL1271_NVS_FEM_COUNT 2 -#define WL1271_NVS_INI_SPARE_SIZE 124 - -struct wl1271_nvs_file { - /* NVS section */ - u8 nvs[WL1271_NVS_SECTION_SIZE]; - - /* INI section */ - u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED]; - u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED]; - u8 dyn_radio_params[WL1271_NVS_FEM_COUNT] - [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED]; - u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE]; -} __attribute__ ((packed)); - /* * Enable/disable 802.11a support for WL1273 */ @@ -317,7 +291,7 @@ struct wl1271_fw_status { __le32 tx_released_blks[NUM_TX_QUEUES]; __le32 fw_localtime; __le32 padding[2]; -} __attribute__ ((packed)); +} __packed; struct wl1271_rx_mem_pool_addr { u32 addr; @@ -325,12 +299,11 @@ struct wl1271_rx_mem_pool_addr { }; struct wl1271_scan { + struct cfg80211_scan_request *req; + bool *scanned_ch; u8 state; u8 ssid[IW_ESSID_MAX_SIZE+1]; size_t ssid_len; - u8 active; - u8 high_prio; - u8 probe_requests; }; struct wl1271_if_operations { @@ -368,13 +341,14 @@ struct wl1271 { #define WL1271_FLAG_JOINED (2) #define WL1271_FLAG_GPIO_POWER (3) #define WL1271_FLAG_TX_QUEUE_STOPPED (4) -#define WL1271_FLAG_SCANNING (5) -#define WL1271_FLAG_IN_ELP (6) -#define WL1271_FLAG_PSM (7) -#define WL1271_FLAG_PSM_REQUESTED (8) -#define WL1271_FLAG_IRQ_PENDING (9) -#define WL1271_FLAG_IRQ_RUNNING (10) -#define WL1271_FLAG_IDLE (11) +#define WL1271_FLAG_IN_ELP (5) +#define WL1271_FLAG_PSM (6) +#define WL1271_FLAG_PSM_REQUESTED (7) +#define WL1271_FLAG_IRQ_PENDING (8) +#define WL1271_FLAG_IRQ_RUNNING (9) +#define WL1271_FLAG_IDLE (10) +#define WL1271_FLAG_IDLE_REQUESTED (11) +#define WL1271_FLAG_PSPOLL_FAILURE (12) unsigned long flags; struct wl1271_partition_set part; @@ -421,6 +395,7 @@ struct wl1271 { /* Pending TX frames */ struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; + int tx_frames_cnt; /* Security sequence number counters */ u8 tx_security_last_seq; @@ -468,6 +443,10 @@ struct wl1271 { struct completion *elp_compl; struct delayed_work elp_work; + struct delayed_work pspoll_work; + + /* counter for ps-poll delivery failures */ + int ps_poll_failures; /* retry counter for PSM entries */ u8 psm_entry_retry; @@ -496,6 +475,9 @@ struct wl1271 { bool sg_enabled; struct list_head list; + + /* Most recently reported noise in dBm */ + s8 noise; }; int wl1271_plt_start(struct wl1271 *wl); diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c index e19e2f8f1e522d7806fcde6224a35bfa90a4b65a..bb245f05af496a2535ea5324b4113f4d499a4628 100644 --- a/drivers/net/wireless/wl12xx/wl1271_acx.c +++ b/drivers/net/wireless/wl12xx/wl1271_acx.c @@ -1075,8 +1075,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) return ret; } -int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, - u8 version) +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address) { struct wl1271_acx_arp_filter *acx; int ret; @@ -1089,17 +1088,11 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, goto out; } - acx->version = version; + acx->version = ACX_IPV4_VERSION; acx->enable = enable; - if (enable == true) { - if (version == ACX_IPV4_VERSION) - memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE); - else if (version == ACX_IPV6_VERSION) - memcpy(acx->address, address, sizeof(acx->address)); - else - wl1271_error("Invalid IP version"); - } + if (enable == true) + memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE); ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER, acx, sizeof(*acx)); @@ -1266,3 +1259,29 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl) kfree(acx); return ret; } + +int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime) +{ + struct wl1271_acx_fw_tsf_information *tsf_info; + int ret; + + tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL); + if (!tsf_info) { + ret = -ENOMEM; + goto out; + } + + ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO, + tsf_info, sizeof(*tsf_info)); + if (ret < 0) { + wl1271_warning("acx tsf info interrogate failed"); + goto out; + } + + *mactime = le32_to_cpu(tsf_info->current_tsf_low) | + ((u64) le32_to_cpu(tsf_info->current_tsf_high) << 32); + +out: + kfree(tsf_info); + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h index 420e7e2fc021b357032a861b67acd2bb82007eff..4235bc56f750eb4b40b036952cd246dc2e67db6c 100644 --- a/drivers/net/wireless/wl12xx/wl1271_acx.h +++ b/drivers/net/wireless/wl12xx/wl1271_acx.h @@ -75,7 +75,7 @@ struct acx_header { /* payload length (not including headers */ __le16 len; -} __attribute__ ((packed)); +} __packed; struct acx_error_counter { struct acx_header header; @@ -98,7 +98,7 @@ struct acx_error_counter { /* the number of missed sequence numbers in the squentially */ /* values of frames seq numbers */ __le32 seq_num_miss; -} __attribute__ ((packed)); +} __packed; struct acx_revision { struct acx_header header; @@ -127,7 +127,7 @@ struct acx_revision { * bits 24 - 31: Chip ID - The WiLink chip ID. */ __le32 hw_version; -} __attribute__ ((packed)); +} __packed; enum wl1271_psm_mode { /* Active mode */ @@ -149,7 +149,7 @@ struct acx_sleep_auth { /* 2 - ELP mode: Deep / Max sleep*/ u8 sleep_auth; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; enum { HOSTIF_PCI_MASTER_HOST_INDIRECT, @@ -187,7 +187,7 @@ struct acx_rx_msdu_lifetime { * firmware discards the MSDU. */ __le32 lifetime; -} __attribute__ ((packed)); +} __packed; /* * RX Config Options Table @@ -275,13 +275,13 @@ struct acx_rx_config { __le32 config_options; __le32 filter_options; -} __attribute__ ((packed)); +} __packed; struct acx_packet_detection { struct acx_header header; __le32 threshold; -} __attribute__ ((packed)); +} __packed; enum acx_slot_type { @@ -299,7 +299,7 @@ struct acx_slot { u8 wone_index; /* Reserved */ u8 slot_time; u8 reserved[6]; -} __attribute__ ((packed)); +} __packed; #define ACX_MC_ADDRESS_GROUP_MAX (8) @@ -312,21 +312,21 @@ struct acx_dot11_grp_addr_tbl { u8 num_groups; u8 pad[2]; u8 mac_table[ADDRESS_GROUP_MAX_LEN]; -} __attribute__ ((packed)); +} __packed; struct acx_rx_timeout { struct acx_header header; __le16 ps_poll_timeout; __le16 upsd_timeout; -} __attribute__ ((packed)); +} __packed; struct acx_rts_threshold { struct acx_header header; __le16 threshold; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct acx_beacon_filter_option { struct acx_header header; @@ -342,7 +342,7 @@ struct acx_beacon_filter_option { */ u8 max_num_beacons; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; /* * ACXBeaconFilterEntry (not 221) @@ -383,21 +383,21 @@ struct acx_beacon_filter_ie_table { u8 num_ie; u8 pad[3]; u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; -} __attribute__ ((packed)); +} __packed; struct acx_conn_monit_params { struct acx_header header; __le32 synch_fail_thold; /* number of beacons missed */ __le32 bss_lose_timeout; /* number of TU's from synch fail */ -} __attribute__ ((packed)); +} __packed; struct acx_bt_wlan_coex { struct acx_header header; u8 enable; u8 pad[3]; -} __attribute__ ((packed)); +} __packed; struct acx_bt_wlan_coex_param { struct acx_header header; @@ -405,7 +405,7 @@ struct acx_bt_wlan_coex_param { __le32 params[CONF_SG_PARAMS_MAX]; u8 param_idx; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; struct acx_dco_itrim_params { struct acx_header header; @@ -413,7 +413,7 @@ struct acx_dco_itrim_params { u8 enable; u8 padding[3]; __le32 timeout; -} __attribute__ ((packed)); +} __packed; struct acx_energy_detection { struct acx_header header; @@ -422,7 +422,7 @@ struct acx_energy_detection { __le16 rx_cca_threshold; u8 tx_energy_detection; u8 pad; -} __attribute__ ((packed)); +} __packed; struct acx_beacon_broadcast { struct acx_header header; @@ -436,14 +436,14 @@ struct acx_beacon_broadcast { /* Consecutive PS Poll failures before updating the host */ u8 ps_poll_threshold; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct acx_event_mask { struct acx_header header; __le32 event_mask; __le32 high_event_mask; /* Unused */ -} __attribute__ ((packed)); +} __packed; #define CFG_RX_FCS BIT(2) #define CFG_RX_ALL_GOOD BIT(3) @@ -488,14 +488,14 @@ struct acx_feature_config { __le32 options; __le32 data_flow_options; -} __attribute__ ((packed)); +} __packed; struct acx_current_tx_power { struct acx_header header; u8 current_tx_power; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; struct acx_wake_up_condition { struct acx_header header; @@ -503,7 +503,7 @@ struct acx_wake_up_condition { u8 wake_up_event; /* Only one bit can be set */ u8 listen_interval; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; struct acx_aid { struct acx_header header; @@ -513,7 +513,7 @@ struct acx_aid { */ __le16 aid; u8 pad[2]; -} __attribute__ ((packed)); +} __packed; enum acx_preamble_type { ACX_PREAMBLE_LONG = 0, @@ -529,7 +529,7 @@ struct acx_preamble { */ u8 preamble; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; enum acx_ctsprotect_type { CTSPROTECT_DISABLE = 0, @@ -540,11 +540,11 @@ struct acx_ctsprotect { struct acx_header header; u8 ctsprotect; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; struct acx_tx_statistics { __le32 internal_desc_overflow; -} __attribute__ ((packed)); +} __packed; struct acx_rx_statistics { __le32 out_of_mem; @@ -555,14 +555,14 @@ struct acx_rx_statistics { __le32 xfr_hint_trig; __le32 path_reset; __le32 reset_counter; -} __attribute__ ((packed)); +} __packed; struct acx_dma_statistics { __le32 rx_requested; __le32 rx_errors; __le32 tx_requested; __le32 tx_errors; -} __attribute__ ((packed)); +} __packed; struct acx_isr_statistics { /* host command complete */ @@ -621,7 +621,7 @@ struct acx_isr_statistics { /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ __le32 low_rssi; -} __attribute__ ((packed)); +} __packed; struct acx_wep_statistics { /* WEP address keys configured */ @@ -643,7 +643,7 @@ struct acx_wep_statistics { /* WEP decrypt interrupts */ __le32 interrupt; -} __attribute__ ((packed)); +} __packed; #define ACX_MISSED_BEACONS_SPREAD 10 @@ -703,12 +703,12 @@ struct acx_pwr_statistics { /* the number of beacons in awake mode */ __le32 rcvd_awake_beacons; -} __attribute__ ((packed)); +} __packed; struct acx_mic_statistics { __le32 rx_pkts; __le32 calc_failure; -} __attribute__ ((packed)); +} __packed; struct acx_aes_statistics { __le32 encrypt_fail; @@ -717,7 +717,7 @@ struct acx_aes_statistics { __le32 decrypt_packets; __le32 encrypt_interrupt; __le32 decrypt_interrupt; -} __attribute__ ((packed)); +} __packed; struct acx_event_statistics { __le32 heart_beat; @@ -728,7 +728,7 @@ struct acx_event_statistics { __le32 oom_late; __le32 phy_transmit_error; __le32 tx_stuck; -} __attribute__ ((packed)); +} __packed; struct acx_ps_statistics { __le32 pspoll_timeouts; @@ -738,7 +738,7 @@ struct acx_ps_statistics { __le32 pspoll_max_apturn; __le32 pspoll_utilization; __le32 upsd_utilization; -} __attribute__ ((packed)); +} __packed; struct acx_rxpipe_statistics { __le32 rx_prep_beacon_drop; @@ -746,7 +746,7 @@ struct acx_rxpipe_statistics { __le32 beacon_buffer_thres_host_int_trig_rx_data; __le32 missed_beacon_host_int_trig_rx_data; __le32 tx_xfr_host_int_trig_rx_data; -} __attribute__ ((packed)); +} __packed; struct acx_statistics { struct acx_header header; @@ -762,7 +762,7 @@ struct acx_statistics { struct acx_event_statistics event; struct acx_ps_statistics ps; struct acx_rxpipe_statistics rxpipe; -} __attribute__ ((packed)); +} __packed; struct acx_rate_class { __le32 enabled_rates; @@ -780,7 +780,7 @@ struct acx_rate_policy { __le32 rate_class_cnt; struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; -} __attribute__ ((packed)); +} __packed; struct acx_ac_cfg { struct acx_header header; @@ -790,7 +790,7 @@ struct acx_ac_cfg { u8 aifsn; u8 reserved; __le16 tx_op_limit; -} __attribute__ ((packed)); +} __packed; struct acx_tid_config { struct acx_header header; @@ -801,19 +801,19 @@ struct acx_tid_config { u8 ack_policy; u8 padding[3]; __le32 apsd_conf[2]; -} __attribute__ ((packed)); +} __packed; struct acx_frag_threshold { struct acx_header header; __le16 frag_threshold; u8 padding[2]; -} __attribute__ ((packed)); +} __packed; struct acx_tx_config_options { struct acx_header header; __le16 tx_compl_timeout; /* msec */ __le16 tx_compl_threshold; /* number of packets */ -} __attribute__ ((packed)); +} __packed; #define ACX_RX_MEM_BLOCKS 70 #define ACX_TX_MIN_MEM_BLOCKS 40 @@ -828,7 +828,7 @@ struct wl1271_acx_config_memory { u8 num_stations; u8 num_ssid_profiles; __le32 total_tx_descriptors; -} __attribute__ ((packed)); +} __packed; struct wl1271_acx_mem_map { struct acx_header header; @@ -872,7 +872,7 @@ struct wl1271_acx_mem_map { u8 *rx_cbuf; __le32 rx_ctrl; __le32 tx_ctrl; -} __attribute__ ((packed)); +} __packed; struct wl1271_acx_rx_config_opt { struct acx_header header; @@ -882,7 +882,7 @@ struct wl1271_acx_rx_config_opt { __le16 timeout; u8 queue_type; u8 reserved; -} __attribute__ ((packed)); +} __packed; struct wl1271_acx_bet_enable { @@ -891,7 +891,7 @@ struct wl1271_acx_bet_enable { u8 enable; u8 max_consecutive; u8 padding[2]; -} __attribute__ ((packed)); +} __packed; #define ACX_IPV4_VERSION 4 #define ACX_IPV6_VERSION 6 @@ -905,7 +905,7 @@ struct wl1271_acx_arp_filter { requests directed to this IP address will pass through. For IPv4, the first four bytes are used. */ -} __attribute__((packed)); +} __packed; struct wl1271_acx_pm_config { struct acx_header header; @@ -913,14 +913,14 @@ struct wl1271_acx_pm_config { __le32 host_clk_settling_time; u8 host_fast_wakeup_support; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; struct wl1271_acx_keep_alive_mode { struct acx_header header; u8 enabled; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; enum { ACX_KEEP_ALIVE_NO_TX = 0, @@ -940,7 +940,7 @@ struct wl1271_acx_keep_alive_config { u8 tpl_validation; u8 trigger; u8 padding; -} __attribute__ ((packed)); +} __packed; enum { WL1271_ACX_TRIG_TYPE_LEVEL = 0, @@ -993,6 +993,17 @@ struct wl1271_acx_rssi_snr_avg_weights { u8 snr_data; }; +struct wl1271_acx_fw_tsf_information { + struct acx_header header; + + __le32 current_tsf_high; + __le32 current_tsf_low; + __le32 last_bttt_high; + __le32 last_tbtt_low; + u8 last_dtim_count; + u8 padding[3]; +} __packed; + enum { ACX_WAKE_UP_CONDITIONS = 0x0002, ACX_MEM_CFG = 0x0003, @@ -1106,13 +1117,13 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl); int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); int wl1271_acx_smart_reflex(struct wl1271 *wl); int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); -int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, - u8 version); +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address); int wl1271_acx_pm_config(struct wl1271 *wl); int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable); int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid); int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, s16 thold, u8 hyst); int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); +int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); #endif /* __WL1271_ACX_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c index 1a36d8a2196e7c7c19f8b81dd079c9eaa9140461..f36430b0336d87d3c118962f0e573132ef7afa07 100644 --- a/drivers/net/wireless/wl12xx/wl1271_boot.c +++ b/drivers/net/wireless/wl12xx/wl1271_boot.c @@ -414,7 +414,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl) PS_REPORT_EVENT_ID | JOIN_EVENT_COMPLETE_ID | DISCONNECT_EVENT_COMPLETE_ID | - RSSI_SNR_TRIGGER_0_EVENT_ID; + RSSI_SNR_TRIGGER_0_EVENT_ID | + PSPOLL_DELIVERY_FAILURE_EVENT_ID | + SOFT_GEMINI_SENSE_EVENT_ID; ret = wl1271_event_unmask(wl); if (ret < 0) { diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c index 19393e236e2c8426fb4002d4e77038a9b13055c4..ce503ddd5a41e8504d894cd5516973d078af8769 100644 --- a/drivers/net/wireless/wl12xx/wl1271_cmd.c +++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c @@ -104,100 +104,6 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, return ret; } -static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl) -{ - struct wl1271_cmd_cal_channel_tune *cmd; - int ret = 0; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->test.id = TEST_CMD_CHANNEL_TUNE; - - cmd->band = WL1271_CHANNEL_TUNE_BAND_2_4; - /* set up any channel, 7 is in the middle of the range */ - cmd->channel = 7; - - ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); - if (ret < 0) - wl1271_warning("TEST_CMD_CHANNEL_TUNE failed"); - - kfree(cmd); - return ret; -} - -static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl) -{ - struct wl1271_cmd_cal_update_ref_point *cmd; - int ret = 0; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->test.id = TEST_CMD_UPDATE_PD_REFERENCE_POINT; - - /* FIXME: still waiting for the correct values */ - cmd->ref_power = 0; - cmd->ref_detector = 0; - - cmd->sub_band = WL1271_PD_REFERENCE_POINT_BAND_B_G; - - ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); - if (ret < 0) - wl1271_warning("TEST_CMD_UPDATE_PD_REFERENCE_POINT failed"); - - kfree(cmd); - return ret; -} - -static int wl1271_cmd_cal_p2g(struct wl1271 *wl) -{ - struct wl1271_cmd_cal_p2g *cmd; - int ret = 0; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->test.id = TEST_CMD_P2G_CAL; - - cmd->sub_band_mask = WL1271_CAL_P2G_BAND_B_G; - - ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); - if (ret < 0) - wl1271_warning("TEST_CMD_P2G_CAL failed"); - - kfree(cmd); - return ret; -} - -static int wl1271_cmd_cal(struct wl1271 *wl) -{ - /* - * FIXME: we must make sure that we're not sleeping when calibration - * is done - */ - int ret; - - wl1271_notice("performing tx calibration"); - - ret = wl1271_cmd_cal_channel_tune(wl); - if (ret < 0) - return ret; - - ret = wl1271_cmd_cal_update_ref_point(wl); - if (ret < 0) - return ret; - - ret = wl1271_cmd_cal_p2g(wl); - if (ret < 0) - return ret; - - return ret; -} - int wl1271_cmd_general_parms(struct wl1271 *wl) { struct wl1271_general_parms_cmd *gen_parms; @@ -212,8 +118,8 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; - memcpy(gen_parms->params, wl->nvs->general_params, - WL1271_NVS_GENERAL_PARAMS_SIZE); + memcpy(&gen_parms->general_params, &wl->nvs->general_params, + sizeof(struct wl1271_ini_general_params)); ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); if (ret < 0) @@ -226,7 +132,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) int wl1271_cmd_radio_parms(struct wl1271 *wl) { struct wl1271_radio_parms_cmd *radio_parms; - struct conf_radio_parms *rparam = &wl->conf.init.radioparam; + struct wl1271_ini_general_params *gp = &wl->nvs->general_params; int ret; if (!wl->nvs) @@ -238,13 +144,20 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl) radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; - memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params, - WL1271_NVS_STAT_RADIO_PARAMS_SIZE); - memcpy(radio_parms->dyn_radio_params, - wl->nvs->dyn_radio_params[rparam->fem], - WL1271_NVS_DYN_RADIO_PARAMS_SIZE); - - /* FIXME: current NVS is missing 5GHz parameters */ + /* 2.4GHz parameters */ + memcpy(&radio_parms->static_params_2, &wl->nvs->stat_radio_params_2, + sizeof(struct wl1271_ini_band_params_2)); + memcpy(&radio_parms->dyn_params_2, + &wl->nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, + sizeof(struct wl1271_ini_fem_params_2)); + + /* 5GHz parameters */ + memcpy(&radio_parms->static_params_5, + &wl->nvs->stat_radio_params_5, + sizeof(struct wl1271_ini_band_params_5)); + memcpy(&radio_parms->dyn_params_5, + &wl->nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, + sizeof(struct wl1271_ini_fem_params_5)); wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", radio_parms, sizeof(*radio_parms)); @@ -288,20 +201,10 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) { - static bool do_cal = true; struct wl1271_cmd_join *join; int ret, i; u8 *bssid; - /* FIXME: remove when we get calibration from the factory */ - if (do_cal) { - ret = wl1271_cmd_cal(wl); - if (ret < 0) - wl1271_warning("couldn't calibrate"); - else - do_cal = false; - } - join = kzalloc(sizeof(*join), GFP_KERNEL); if (!join) { ret = -ENOMEM; @@ -329,12 +232,6 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) join->channel = wl->channel; join->ssid_len = wl->ssid_len; memcpy(join->ssid, wl->ssid, wl->ssid_len); - join->ctrl = WL1271_JOIN_CMD_CTRL_TX_FLUSH; - - /* increment the session counter */ - wl->session_counter++; - if (wl->session_counter >= SESSION_COUNTER_MAX) - wl->session_counter = 0; join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; @@ -517,7 +414,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send) ps_params->send_null_data = send; ps_params->retries = 5; ps_params->hang_over_period = 1; - ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */ + ps_params->null_data_rate = cpu_to_le32(wl->basic_rate_set); ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, sizeof(*ps_params), 0); @@ -566,140 +463,6 @@ int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, return ret; } -int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, - const u8 *ie, size_t ie_len, u8 active_scan, - u8 high_prio, u8 band, u8 probe_requests) -{ - - struct wl1271_cmd_trigger_scan_to *trigger = NULL; - struct wl1271_cmd_scan *params = NULL; - struct ieee80211_channel *channels; - u32 rate; - int i, j, n_ch, ret; - u16 scan_options = 0; - u8 ieee_band; - - if (band == WL1271_SCAN_BAND_2_4_GHZ) { - ieee_band = IEEE80211_BAND_2GHZ; - rate = wl->conf.tx.basic_rate; - } else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) { - ieee_band = IEEE80211_BAND_2GHZ; - rate = wl->conf.tx.basic_rate; - } else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) { - ieee_band = IEEE80211_BAND_5GHZ; - rate = wl->conf.tx.basic_rate_5; - } else - return -EINVAL; - - if (wl->hw->wiphy->bands[ieee_band]->channels == NULL) - return -EINVAL; - - channels = wl->hw->wiphy->bands[ieee_band]->channels; - n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels; - - if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) - return -EINVAL; - - params = kzalloc(sizeof(*params), GFP_KERNEL); - if (!params) - return -ENOMEM; - - params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD); - params->params.rx_filter_options = - cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN); - - if (!active_scan) - scan_options |= WL1271_SCAN_OPT_PASSIVE; - if (high_prio) - scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH; - params->params.scan_options = cpu_to_le16(scan_options); - - params->params.num_probe_requests = probe_requests; - params->params.tx_rate = cpu_to_le32(rate); - params->params.tid_trigger = 0; - params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; - - if (band == WL1271_SCAN_BAND_DUAL) - params->params.band = WL1271_SCAN_BAND_2_4_GHZ; - else - params->params.band = band; - - for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) { - if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) { - params->channels[j].min_duration = - cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); - params->channels[j].max_duration = - cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); - memset(¶ms->channels[j].bssid_lsb, 0xff, 4); - memset(¶ms->channels[j].bssid_msb, 0xff, 2); - params->channels[j].early_termination = 0; - params->channels[j].tx_power_att = - WL1271_SCAN_CURRENT_TX_PWR; - params->channels[j].channel = channels[i].hw_value; - j++; - } - } - - params->params.num_channels = j; - - if (ssid_len && ssid) { - params->params.ssid_len = ssid_len; - memcpy(params->params.ssid, ssid, ssid_len); - } - - ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len, - ie, ie_len, ieee_band); - if (ret < 0) { - wl1271_error("PROBE request template failed"); - goto out; - } - - trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); - if (!trigger) { - ret = -ENOMEM; - goto out; - } - - /* disable the timeout */ - trigger->timeout = 0; - - ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, - sizeof(*trigger), 0); - if (ret < 0) { - wl1271_error("trigger scan to failed for hw scan"); - goto out; - } - - wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); - - set_bit(WL1271_FLAG_SCANNING, &wl->flags); - if (wl1271_11a_enabled()) { - wl->scan.state = band; - if (band == WL1271_SCAN_BAND_DUAL) { - wl->scan.active = active_scan; - wl->scan.high_prio = high_prio; - wl->scan.probe_requests = probe_requests; - if (ssid_len && ssid) { - wl->scan.ssid_len = ssid_len; - memcpy(wl->scan.ssid, ssid, ssid_len); - } else - wl->scan.ssid_len = 0; - } - } - - ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0); - if (ret < 0) { - wl1271_error("SCAN failed"); - clear_bit(WL1271_FLAG_SCANNING, &wl->flags); - goto out; - } - -out: - kfree(params); - kfree(trigger); - return ret; -} - int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, void *buf, size_t buf_len, int index, u32 rates) { @@ -804,7 +567,7 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) goto out; ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data, - skb->len, 0, wl->basic_rate); + skb->len, 0, wl->basic_rate_set); out: dev_kfree_skb(skb); diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h index f2820b42a943f8e952a54410d1adc566e65484ae..af577ee8eb0257c4585c86518f776b98d380e387 100644 --- a/drivers/net/wireless/wl12xx/wl1271_cmd.h +++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h @@ -41,9 +41,6 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send); int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, size_t len); -int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, - const u8 *ie, size_t ie_len, u8 active_scan, - u8 high_prio, u8 band, u8 probe_requests); int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, void *buf, size_t buf_len, int index, u32 rates); int wl1271_cmd_build_null_data(struct wl1271 *wl); @@ -136,14 +133,14 @@ struct wl1271_cmd_header { __le16 status; /* payload */ u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define WL1271_CMD_MAX_PARAMS 572 struct wl1271_command { struct wl1271_cmd_header header; u8 parameters[WL1271_CMD_MAX_PARAMS]; -} __attribute__ ((packed)); +} __packed; enum { CMD_MAILBOX_IDLE = 0, @@ -196,7 +193,7 @@ struct cmd_read_write_memory { of this field is the Host in WRITE command or the Wilink in READ command. */ u8 value[MAX_READ_SIZE]; -} __attribute__ ((packed)); +} __packed; #define CMDMBOX_HEADER_LEN 4 #define CMDMBOX_INFO_ELEM_HEADER_LEN 4 @@ -243,14 +240,14 @@ struct wl1271_cmd_join { u8 ssid[IW_ESSID_MAX_SIZE]; u8 ctrl; /* JOIN_CMD_CTRL_* */ u8 reserved[3]; -} __attribute__ ((packed)); +} __packed; struct cmd_enabledisable_path { struct wl1271_cmd_header header; u8 channel; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; #define WL1271_RATE_AUTOMATIC 0 @@ -266,7 +263,7 @@ struct wl1271_cmd_template_set { u8 aflags; u8 reserved; u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; -} __attribute__ ((packed)); +} __packed; #define TIM_ELE_ID 5 #define PARTIAL_VBM_MAX 251 @@ -278,7 +275,7 @@ struct wl1271_tim { u8 dtim_period; u8 bitmap_ctrl; u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ -} __attribute__ ((packed)); +} __packed; enum wl1271_cmd_ps_mode { STATION_ACTIVE_MODE, @@ -298,7 +295,7 @@ struct wl1271_cmd_ps_params { */ u8 hang_over_period; __le32 null_data_rate; -} __attribute__ ((packed)); +} __packed; /* HW encryption keys */ #define NUM_ACCESS_CATEGORIES_COPY 4 @@ -348,77 +345,12 @@ struct wl1271_cmd_set_keys { u8 key[MAX_KEY_SIZE]; __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; -} __attribute__ ((packed)); - - -#define WL1271_SCAN_MAX_CHANNELS 24 -#define WL1271_SCAN_DEFAULT_TAG 1 -#define WL1271_SCAN_CURRENT_TX_PWR 0 -#define WL1271_SCAN_OPT_ACTIVE 0 -#define WL1271_SCAN_OPT_PASSIVE 1 -#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 -#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */ -#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */ -#define WL1271_SCAN_BAND_2_4_GHZ 0 -#define WL1271_SCAN_BAND_5_GHZ 1 -#define WL1271_SCAN_BAND_DUAL 2 - -struct basic_scan_params { - __le32 rx_config_options; - __le32 rx_filter_options; - /* Scan option flags (WL1271_SCAN_OPT_*) */ - __le16 scan_options; - /* Number of scan channels in the list (maximum 30) */ - u8 num_channels; - /* This field indicates the number of probe requests to send - per channel for an active scan */ - u8 num_probe_requests; - /* Rate bit field for sending the probes */ - __le32 tx_rate; - u8 tid_trigger; - u8 ssid_len; - /* in order to align */ - u8 padding1[2]; - u8 ssid[IW_ESSID_MAX_SIZE]; - /* Band to scan */ - u8 band; - u8 use_ssid_list; - u8 scan_tag; - u8 padding2; -} __attribute__ ((packed)); - -struct basic_scan_channel_params { - /* Duration in TU to wait for frames on a channel for active scan */ - __le32 min_duration; - __le32 max_duration; - __le32 bssid_lsb; - __le16 bssid_msb; - u8 early_termination; - u8 tx_power_att; - u8 channel; - /* FW internal use only! */ - u8 dfs_candidate; - u8 activity_detected; - u8 pad; -} __attribute__ ((packed)); - -struct wl1271_cmd_scan { - struct wl1271_cmd_header header; - - struct basic_scan_params params; - struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; -} __attribute__ ((packed)); - -struct wl1271_cmd_trigger_scan_to { - struct wl1271_cmd_header header; - - __le32 timeout; -} __attribute__ ((packed)); +} __packed; struct wl1271_cmd_test_header { u8 id; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; enum wl1271_channel_tune_bands { WL1271_CHANNEL_TUNE_BAND_2_4, @@ -439,25 +371,31 @@ struct wl1271_general_parms_cmd { struct wl1271_cmd_test_header test; - u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE]; - s8 reserved[23]; -} __attribute__ ((packed)); + struct wl1271_ini_general_params general_params; -#define WL1271_STAT_RADIO_PARAMS_5_SIZE 29 -#define WL1271_DYN_RADIO_PARAMS_5_SIZE 104 + u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 sr_sen_n_p; + u8 sr_sen_n_p_gain; + u8 sr_sen_nrn; + u8 sr_sen_prn; + u8 padding[3]; +} __packed; struct wl1271_radio_parms_cmd { struct wl1271_cmd_header header; struct wl1271_cmd_test_header test; - u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE]; - u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE]; + /* Static radio parameters */ + struct wl1271_ini_band_params_2 static_params_2; + struct wl1271_ini_band_params_5 static_params_5; - u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE]; - u8 reserved; - u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE]; -} __attribute__ ((packed)); + /* Dynamic radio parameters */ + struct wl1271_ini_fem_params_2 dyn_params_2; + u8 padding2; + struct wl1271_ini_fem_params_5 dyn_params_5; + u8 padding3[2]; +} __packed; struct wl1271_cmd_cal_channel_tune { struct wl1271_cmd_header header; @@ -468,7 +406,7 @@ struct wl1271_cmd_cal_channel_tune { u8 channel; __le16 radio_status; -} __attribute__ ((packed)); +} __packed; struct wl1271_cmd_cal_update_ref_point { struct wl1271_cmd_header header; @@ -479,7 +417,7 @@ struct wl1271_cmd_cal_update_ref_point { __le32 ref_detector; u8 sub_band; u8 padding[3]; -} __attribute__ ((packed)); +} __packed; #define MAX_TLV_LENGTH 400 #define MAX_NVS_VERSION_LENGTH 12 @@ -501,7 +439,7 @@ struct wl1271_cmd_cal_p2g { u8 sub_band_mask; u8 padding2; -} __attribute__ ((packed)); +} __packed; /* @@ -529,6 +467,6 @@ struct wl1271_cmd_disconnect { u8 type; u8 padding; -} __attribute__ ((packed)); +} __packed; #endif /* __WL1271_CMD_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h index d046d044b5bd8a640acb99bfcc08259e2654b1f6..0435ffda8f7340b14cbe9dfcee73f492a80c6b3e 100644 --- a/drivers/net/wireless/wl12xx/wl1271_conf.h +++ b/drivers/net/wireless/wl12xx/wl1271_conf.h @@ -873,6 +873,13 @@ struct conf_conn_settings { */ u8 ps_poll_threshold; + /* + * PS Poll failure recovery ACTIVE period length + * + * Range: u32 (ms) + */ + u32 ps_poll_recovery_period; + /* * Configuration of signal average weights. */ @@ -948,14 +955,6 @@ struct conf_radio_parms { u8 fem; }; -struct conf_init_settings { - /* - * Configure radio parameters. - */ - struct conf_radio_parms radioparam; - -}; - struct conf_itrim_settings { /* enable dco itrim */ u8 enable; @@ -1022,7 +1021,6 @@ struct conf_drv_settings { struct conf_rx_settings rx; struct conf_tx_settings tx; struct conf_conn_settings conn; - struct conf_init_settings init; struct conf_itrim_settings itrim; struct conf_pm_config_settings pm_config; struct conf_roam_trigger_settings roam_trigger; diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c index cf37aa6eb13725cd1f0ef2cb1dd5d70bd1ed69f2..25ce2cd5e3f3fa406f1d7fb3aa574da9d5113631 100644 --- a/drivers/net/wireless/wl12xx/wl1271_event.c +++ b/drivers/net/wireless/wl12xx/wl1271_event.c @@ -26,36 +26,64 @@ #include "wl1271_io.h" #include "wl1271_event.h" #include "wl1271_ps.h" +#include "wl1271_scan.h" #include "wl12xx_80211.h" -static int wl1271_event_scan_complete(struct wl1271 *wl, - struct event_mailbox *mbox) +void wl1271_pspoll_work(struct work_struct *work) { - wl1271_debug(DEBUG_EVENT, "status: 0x%x", - mbox->scheduled_scan_status); - - if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) { - if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { - /* 2.4 GHz band scanned, scan 5 GHz band, pretend - * to the wl1271_cmd_scan function that we are not - * scanning as it checks that. - */ - clear_bit(WL1271_FLAG_SCANNING, &wl->flags); - /* FIXME: ie missing! */ - wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, - NULL, 0, - wl->scan.active, - wl->scan.high_prio, - WL1271_SCAN_BAND_5_GHZ, - wl->scan.probe_requests); - } else { - mutex_unlock(&wl->mutex); - ieee80211_scan_completed(wl->hw, false); - mutex_lock(&wl->mutex); - clear_bit(WL1271_FLAG_SCANNING, &wl->flags); - } + struct delayed_work *dwork; + struct wl1271 *wl; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1271, pspoll_work); + + wl1271_debug(DEBUG_EVENT, "pspoll work"); + + mutex_lock(&wl->mutex); + + if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags)) + goto out; + + if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + goto out; + + /* + * if we end up here, then we were in powersave when the pspoll + * delivery failure occurred, and no-one changed state since, so + * we should go back to powersave. + */ + wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, true); + +out: + mutex_unlock(&wl->mutex); +}; + +static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl) +{ + int delay = wl->conf.conn.ps_poll_recovery_period; + int ret; + + wl->ps_poll_failures++; + if (wl->ps_poll_failures == 1) + wl1271_info("AP with dysfunctional ps-poll, " + "trying to work around it."); + + /* force active mode receive data from the AP */ + if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { + ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, true); + if (ret < 0) + return; + set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); + ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work, + msecs_to_jiffies(delay)); } - return 0; + + /* + * If already in active mode, lets we should be getting data from + * the AP right away. If we enter PSM too fast after this, and data + * remains on the AP, we will get another event like this, and we'll + * go into active once more. + */ } static int wl1271_event_ps_report(struct wl1271 *wl, @@ -163,9 +191,19 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); if (vector & SCAN_COMPLETE_EVENT_ID) { - ret = wl1271_event_scan_complete(wl, mbox); - if (ret < 0) - return ret; + wl1271_debug(DEBUG_EVENT, "status: 0x%x", + mbox->scheduled_scan_status); + + wl1271_scan_stm(wl); + } + + /* disable dynamic PS when requested by the firmware */ + if (vector & SOFT_GEMINI_SENSE_EVENT_ID && + wl->bss_type == BSS_TYPE_STA_BSS) { + if (mbox->soft_gemini_sense_info) + ieee80211_disable_dyn_ps(wl->vif); + else + ieee80211_enable_dyn_ps(wl->vif); } /* @@ -191,6 +229,9 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) return ret; } + if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) + wl1271_event_pspoll_delivery_fail(wl); + if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT"); if (wl->vif) diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h index 58371008f270f29b375151570d8dd597e02a350a..e4751667cf5ef905b8c842883beb3a037b392c90 100644 --- a/drivers/net/wireless/wl12xx/wl1271_event.h +++ b/drivers/net/wireless/wl12xx/wl1271_event.h @@ -85,7 +85,7 @@ struct event_debug_report { __le32 report_1; __le32 report_2; __le32 report_3; -} __attribute__ ((packed)); +} __packed; #define NUM_OF_RSSI_SNR_TRIGGERS 8 @@ -116,10 +116,11 @@ struct event_mailbox { u8 ps_status; u8 reserved_5[29]; -} __attribute__ ((packed)); +} __packed; int wl1271_event_unmask(struct wl1271 *wl); void wl1271_event_mbox_config(struct wl1271 *wl); int wl1271_event_handle(struct wl1271 *wl, u8 mbox); +void wl1271_pspoll_work(struct work_struct *work); #endif diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/wl1271_ini.h new file mode 100644 index 0000000000000000000000000000000000000000..2313047d401524ad62b4b396f3462cafe94e1060 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_ini.h @@ -0,0 +1,123 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_INI_H__ +#define __WL1271_INI_H__ + +#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16 + +struct wl1271_ini_general_params { + u8 ref_clock; + u8 settling_time; + u8 clk_valid_on_wakeup; + u8 dc2dc_mode; + u8 dual_mode_select; + u8 tx_bip_fem_auto_detect; + u8 tx_bip_fem_manufacturer; + u8 general_settings; + u8 sr_state; + u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM]; +} __packed; + +#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15 + +struct wl1271_ini_band_params_2 { + u8 rx_trace_insertion_loss; + u8 tx_trace_loss; + u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; +} __packed; + +#define WL1271_INI_RATE_GROUP_COUNT 6 +#define WL1271_INI_CHANNEL_COUNT_2 14 + +struct wl1271_ini_fem_params_2 { + __le16 tx_bip_ref_pd_voltage; + u8 tx_bip_ref_power; + u8 tx_bip_ref_offset; + u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2]; + u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2]; + u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT]; + u8 rx_fem_insertion_loss; + u8 degraded_low_to_normal_thr; + u8 normal_to_degraded_high_thr; +} __packed; + +#define WL1271_INI_CHANNEL_COUNT_5 35 +#define WL1271_INI_SUB_BAND_COUNT_5 7 + +struct wl1271_ini_band_params_5 { + u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_trace_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; +} __packed; + +struct wl1271_ini_fem_params_5 { + __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5]; + u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT]; + u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 degraded_low_to_normal_thr; + u8 normal_to_degraded_high_thr; +} __packed; + + +/* NVS data structure */ +#define WL1271_INI_NVS_SECTION_SIZE 468 +#define WL1271_INI_FEM_MODULE_COUNT 2 + +#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800 + +struct wl1271_nvs_file { + /* NVS section */ + u8 nvs[WL1271_INI_NVS_SECTION_SIZE]; + + /* INI section */ + struct wl1271_ini_general_params general_params; + u8 padding1; + struct wl1271_ini_band_params_2 stat_radio_params_2; + u8 padding2; + struct { + struct wl1271_ini_fem_params_2 params; + u8 padding; + } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT]; + struct wl1271_ini_band_params_5 stat_radio_params_5; + u8 padding3; + struct { + struct wl1271_ini_fem_params_5 params; + u8 padding; + } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT]; +} __packed; + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c index b7d9137851acd1ec71af9fdcb3271a9a773082a0..9d68f0012f05af67317ff6cfc9668b833e1c9d00 100644 --- a/drivers/net/wireless/wl12xx/wl1271_main.c +++ b/drivers/net/wireless/wl12xx/wl1271_main.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -45,6 +44,7 @@ #include "wl1271_cmd.h" #include "wl1271_boot.h" #include "wl1271_testmode.h" +#include "wl1271_scan.h" #define WL1271_BOOT_RETRIES 3 @@ -55,7 +55,7 @@ static struct conf_drv_settings default_conf = { [CONF_SG_HV3_MAX_OVERRIDE] = 0, [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400, [CONF_SG_BT_LOAD_RATIO] = 50, - [CONF_SG_AUTO_PS_MODE] = 0, + [CONF_SG_AUTO_PS_MODE] = 1, [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, [CONF_SG_ANTENNA_CONFIGURATION] = 0, @@ -234,18 +234,14 @@ static struct conf_drv_settings default_conf = { .beacon_rx_timeout = 10000, .broadcast_timeout = 20000, .rx_broadcast_in_ps = 1, - .ps_poll_threshold = 20, + .ps_poll_threshold = 10, + .ps_poll_recovery_period = 700, .bet_enable = CONF_BET_MODE_ENABLE, .bet_max_consecutive = 10, .psm_entry_retries = 3, .keep_alive_interval = 55000, .max_listen_interval = 20, }, - .init = { - .radioparam = { - .fem = 1, - } - }, .itrim = { .enable = false, .timeout = 50000, @@ -566,14 +562,21 @@ static int wl1271_fetch_nvs(struct wl1271 *wl) return ret; } - if (fw->size != sizeof(struct wl1271_nvs_file)) { + /* + * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band + * configurations) can be removed when those NVS files stop floating + * around. + */ + if (fw->size != sizeof(struct wl1271_nvs_file) && + (fw->size != WL1271_INI_LEGACY_NVS_FILE_SIZE || + wl1271_11a_enabled())) { wl1271_error("nvs size is not as expected: %zu != %zu", fw->size, sizeof(struct wl1271_nvs_file)); ret = -EILSEQ; goto out; } - wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); + wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL); if (!wl->nvs) { wl1271_error("could not allocate memory for the nvs file"); @@ -581,8 +584,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl) goto out; } - memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file)); - out: release_firmware(fw); @@ -811,93 +812,6 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) return NETDEV_TX_OK; } -static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, - void *arg) -{ - struct net_device *dev; - struct wireless_dev *wdev; - struct wiphy *wiphy; - struct ieee80211_hw *hw; - struct wl1271 *wl; - struct wl1271 *wl_temp; - struct in_device *idev; - struct in_ifaddr *ifa = arg; - int ret = 0; - - /* FIXME: this ugly function should probably be implemented in the - * mac80211, and here should only be a simple callback handling actual - * setting of the filters. Now we need to dig up references to - * various structures to gain access to what we need. - * Also, because of this, there is no "initial" setting of the filter - * in "op_start", because we don't want to dig up struct net_device - * there - the filter will be set upon first change of the interface - * IP address. */ - - dev = ifa->ifa_dev->dev; - - wdev = dev->ieee80211_ptr; - if (wdev == NULL) - return NOTIFY_DONE; - - wiphy = wdev->wiphy; - if (wiphy == NULL) - return NOTIFY_DONE; - - hw = wiphy_priv(wiphy); - if (hw == NULL) - return NOTIFY_DONE; - - /* Check that the interface is one supported by this driver. */ - wl_temp = hw->priv; - list_for_each_entry(wl, &wl_list, list) { - if (wl == wl_temp) - break; - } - if (wl != wl_temp) - return NOTIFY_DONE; - - /* Get the interface IP address for the device. "ifa" will become - NULL if: - - there is no IPV4 protocol address configured - - there are multiple (virtual) IPV4 addresses configured - When "ifa" is NULL, filtering will be disabled. - */ - ifa = NULL; - idev = dev->ip_ptr; - if (idev) - ifa = idev->ifa_list; - - if (ifa && ifa->ifa_next) - ifa = NULL; - - mutex_lock(&wl->mutex); - - if (wl->state == WL1271_STATE_OFF) - goto out; - - ret = wl1271_ps_elp_wakeup(wl, false); - if (ret < 0) - goto out; - if (ifa) - ret = wl1271_acx_arp_ip_filter(wl, true, - (u8 *)&ifa->ifa_address, - ACX_IPV4_VERSION); - else - ret = wl1271_acx_arp_ip_filter(wl, false, NULL, - ACX_IPV4_VERSION); - wl1271_ps_elp_sleep(wl); - -out: - mutex_unlock(&wl->mutex); - - return NOTIFY_OK; -} - -static struct notifier_block wl1271_dev_notifier = { - .notifier_call = wl1271_dev_notify, -}; - - static int wl1271_op_start(struct ieee80211_hw *hw) { wl1271_debug(DEBUG_MAC80211, "mac80211 start"); @@ -925,6 +839,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; + struct wiphy *wiphy = hw->wiphy; int retries = WL1271_BOOT_RETRIES; int ret = 0; @@ -978,6 +893,12 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, wl->state = WL1271_STATE_ON; wl1271_info("firmware booted (%s)", wl->chip.fw_ver); + + /* update hw/fw version info in wiphy struct */ + wiphy->hw_version = wl->chip.id; + strncpy(wiphy->fw_version, wl->chip.fw_ver, + sizeof(wiphy->fw_version)); + goto out; irq_disable: @@ -1001,10 +922,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, out: mutex_unlock(&wl->mutex); - if (!ret) { + if (!ret) list_add(&wl->list, &wl_list); - register_inetaddr_notifier(&wl1271_dev_notifier); - } return ret; } @@ -1015,8 +934,6 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw, struct wl1271 *wl = hw->priv; int i; - unregister_inetaddr_notifier(&wl1271_dev_notifier); - mutex_lock(&wl->mutex); wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); @@ -1026,10 +943,17 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw, WARN_ON(wl->state != WL1271_STATE_ON); - if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) { + /* enable dyn ps just in case (if left on due to fw crash etc) */ + if (wl->bss_type == BSS_TYPE_STA_BSS) + ieee80211_enable_dyn_ps(wl->vif); + + if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { mutex_unlock(&wl->mutex); ieee80211_scan_completed(wl->hw, true); mutex_lock(&wl->mutex); + wl->scan.state = WL1271_SCAN_STATE_IDLE; + kfree(wl->scan.scanned_ch); + wl->scan.scanned_ch = NULL; } wl->state = WL1271_STATE_OFF; @@ -1040,11 +964,12 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw, cancel_work_sync(&wl->irq_work); cancel_work_sync(&wl->tx_work); + cancel_delayed_work_sync(&wl->pspoll_work); mutex_lock(&wl->mutex); /* let's notify MAC80211 about the remaining pending TX frames */ - wl1271_tx_flush(wl); + wl1271_tx_reset(wl); wl1271_power_off(wl); memset(wl->bssid, 0, ETH_ALEN); @@ -1241,6 +1166,42 @@ static u32 wl1271_min_rate_get(struct wl1271 *wl) return rate; } +static int wl1271_handle_idle(struct wl1271 *wl, bool idle) +{ + int ret; + + if (idle) { + if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { + ret = wl1271_unjoin(wl); + if (ret < 0) + goto out; + } + wl->rate_set = wl1271_min_rate_get(wl); + wl->sta_rate_set = 0; + ret = wl1271_acx_rate_policies(wl); + if (ret < 0) + goto out; + ret = wl1271_acx_keep_alive_config( + wl, CMD_TEMPL_KLV_IDX_NULL_DATA, + ACX_KEEP_ALIVE_TPL_INVALID); + if (ret < 0) + goto out; + set_bit(WL1271_FLAG_IDLE, &wl->flags); + } else { + /* increment the session counter */ + wl->session_counter++; + if (wl->session_counter >= SESSION_COUNTER_MAX) + wl->session_counter = 0; + ret = wl1271_dummy_join(wl); + if (ret < 0) + goto out; + clear_bit(WL1271_FLAG_IDLE, &wl->flags); + } + +out: + return ret; +} + static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) { struct wl1271 *wl = hw->priv; @@ -1255,6 +1216,15 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) conf->power_level, conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use"); + /* + * mac80211 will go to idle nearly immediately after transmitting some + * frames, such as the deauth. To make sure those frames reach the air, + * wait here until the TX queue is fully flushed. + */ + if ((changed & IEEE80211_CONF_CHANGE_IDLE) && + (conf->flags & IEEE80211_CONF_IDLE)) + wl1271_tx_flush(wl); + mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) @@ -1295,24 +1265,18 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_IDLE) { - if (conf->flags & IEEE80211_CONF_IDLE && - test_bit(WL1271_FLAG_JOINED, &wl->flags)) - wl1271_unjoin(wl); - else if (!(conf->flags & IEEE80211_CONF_IDLE)) - wl1271_dummy_join(wl); - - if (conf->flags & IEEE80211_CONF_IDLE) { - wl->rate_set = wl1271_min_rate_get(wl); - wl->sta_rate_set = 0; - wl1271_acx_rate_policies(wl); - wl1271_acx_keep_alive_config( - wl, CMD_TEMPL_KLV_IDX_NULL_DATA, - ACX_KEEP_ALIVE_TPL_INVALID); - set_bit(WL1271_FLAG_IDLE, &wl->flags); - } else - clear_bit(WL1271_FLAG_IDLE, &wl->flags); + ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE); + if (ret < 0) + wl1271_warning("idle mode change failed %d", ret); } + /* + * if mac80211 changes the PSM mode, make sure the mode is not + * incorrectly changed after the pspoll failure active window. + */ + if (changed & IEEE80211_CONF_CHANGE_PS) + clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); + if (conf->flags & IEEE80211_CONF_PS && !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); @@ -1595,13 +1559,9 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, goto out; if (wl1271_11a_enabled()) - ret = wl1271_cmd_scan(hw->priv, ssid, len, - req->ie, req->ie_len, 1, 0, - WL1271_SCAN_BAND_DUAL, 3); + ret = wl1271_scan(hw->priv, ssid, len, req); else - ret = wl1271_cmd_scan(hw->priv, ssid, len, - req->ie, req->ie_len, 1, 0, - WL1271_SCAN_BAND_2_4_GHZ, 3); + ret = wl1271_scan(hw->priv, ssid, len, req); wl1271_ps_elp_sleep(wl); @@ -1774,6 +1734,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, wl->aid = bss_conf->aid; set_assoc = true; + wl->ps_poll_failures = 0; + /* * use basic rates from AP, and determine lowest rate * to use with control frames. @@ -1823,6 +1785,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); wl->aid = 0; + /* re-enable dynamic ps - just in case */ + ieee80211_enable_dyn_ps(wl->vif); + /* revert back to minimum rates for the current band */ wl1271_set_band_rate(wl); wl->basic_rate = wl1271_min_rate_get(wl); @@ -1871,6 +1836,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, } } + if (changed & BSS_CHANGED_ARP_FILTER) { + __be32 addr = bss_conf->arp_addr_list[0]; + WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); + + if (bss_conf->arp_addr_cnt == 1 && bss_conf->arp_filter_enabled) + ret = wl1271_acx_arp_ip_filter(wl, true, addr); + else + ret = wl1271_acx_arp_ip_filter(wl, false, addr); + + if (ret < 0) + goto out_sleep; + } + if (do_join) { ret = wl1271_join(wl, set_assoc); if (ret < 0) { @@ -1929,6 +1907,48 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue, return ret; } +static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw) +{ + + struct wl1271 *wl = hw->priv; + u64 mactime = ULLONG_MAX; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf"); + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + ret = wl1271_acx_tsf_info(wl, &mactime); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + return mactime; +} + +static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct wl1271 *wl = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + + if (idx != 0) + return -ENOENT; + + survey->channel = conf->channel; + survey->filled = SURVEY_INFO_NOISE_DBM; + survey->noise = wl->noise; + + return 0; +} /* can't be const, mac80211 writes to this */ static struct ieee80211_rate wl1271_rates[] = { @@ -1991,7 +2011,7 @@ static struct ieee80211_channel wl1271_channels[] = { }; /* mapping to indexes for wl1271_rates */ -const static u8 wl1271_rate_to_idx_2ghz[] = { +static const u8 wl1271_rate_to_idx_2ghz[] = { /* MCS rates are used only with 11n */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ @@ -2103,7 +2123,7 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = { }; /* mapping to indexes for wl1271_rates_5ghz */ -const static u8 wl1271_rate_to_idx_5ghz[] = { +static const u8 wl1271_rate_to_idx_5ghz[] = { /* MCS rates are used only with 11n */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ @@ -2139,7 +2159,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = { .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), }; -const static u8 *wl1271_band_rate_to_idx[] = { +static const u8 *wl1271_band_rate_to_idx[] = { [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz, [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz }; @@ -2158,6 +2178,8 @@ static const struct ieee80211_ops wl1271_ops = { .bss_info_changed = wl1271_op_bss_info_changed, .set_rts_threshold = wl1271_op_set_rts_threshold, .conf_tx = wl1271_op_conf_tx, + .get_tsf = wl1271_op_get_tsf, + .get_survey = wl1271_op_get_survey, CFG80211_TESTMODE_CMD(wl1271_tm_cmd) }; @@ -2350,15 +2372,13 @@ struct ieee80211_hw *wl1271_alloc_hw(void) goto err_hw_alloc; } - plat_dev = kmalloc(sizeof(wl1271_device), GFP_KERNEL); + plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL); if (!plat_dev) { wl1271_error("could not allocate platform_device"); ret = -ENOMEM; goto err_plat_alloc; } - memcpy(plat_dev, &wl1271_device, sizeof(wl1271_device)); - wl = hw->priv; memset(wl, 0, sizeof(*wl)); @@ -2370,6 +2390,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void) skb_queue_head_init(&wl->tx_queue); INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); + INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); wl->channel = WL1271_DEFAULT_CHANNEL; wl->beacon_int = WL1271_DEFAULT_BEACON_INT; wl->default_key = 0; diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c index b98fb643fab0cd674fd33e9b3969e95cab4ea0ef..019aa79cd9dfc8cdc25c49973c93d5ecf6592d0f 100644 --- a/drivers/net/wireless/wl12xx/wl1271_rx.c +++ b/drivers/net/wireless/wl12xx/wl1271_rx.c @@ -53,13 +53,14 @@ static void wl1271_rx_status(struct wl1271 *wl, status->band = wl->band; status->rate_idx = wl1271_rate_to_idx(wl, desc->rate); + status->signal = desc->rssi; + /* - * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the - * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we - * only need the mactime for monitor mode. For now the mactime is - * not valid, so RX_FLAG_TSFT should not be set + * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we + * need to divide by two for now, but TI has been discussing about + * changing it. This needs to be rechecked. */ - status->signal = desc->rssi; + wl->noise = desc->rssi - (desc->snr >> 1); status->freq = ieee80211_channel_to_frequency(desc->channel); diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h index b89be4758e7884b78718295de7554906d9b9518e..13a232333b13fd5cb6e5fbbd260b7e5d9195d6c4 100644 --- a/drivers/net/wireless/wl12xx/wl1271_rx.h +++ b/drivers/net/wireless/wl12xx/wl1271_rx.h @@ -113,7 +113,7 @@ struct wl1271_rx_descriptor { u8 process_id; u8 pad_len; u8 reserved; -} __attribute__ ((packed)); +} __packed; void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/wl1271_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..fec43eed8c553424231c6b990132b35bf8b7167b --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_scan.c @@ -0,0 +1,257 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include + +#include "wl1271.h" +#include "wl1271_cmd.h" +#include "wl1271_scan.h" +#include "wl1271_acx.h" + +static int wl1271_get_scan_channels(struct wl1271 *wl, + struct cfg80211_scan_request *req, + struct basic_scan_channel_params *channels, + enum ieee80211_band band, bool passive) +{ + int i, j; + u32 flags; + + for (i = 0, j = 0; + i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS; + i++) { + + flags = req->channels[i]->flags; + + if (!wl->scan.scanned_ch[i] && + !(flags & IEEE80211_CHAN_DISABLED) && + ((!!(flags & IEEE80211_CHAN_PASSIVE_SCAN)) == passive) && + (req->channels[i]->band == band)) { + + wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", + req->channels[i]->band, + req->channels[i]->center_freq); + wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X", + req->channels[i]->hw_value, + req->channels[i]->flags); + wl1271_debug(DEBUG_SCAN, + "max_antenna_gain %d, max_power %d", + req->channels[i]->max_antenna_gain, + req->channels[i]->max_power); + wl1271_debug(DEBUG_SCAN, "beacon_found %d", + req->channels[i]->beacon_found); + + channels[j].min_duration = + cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); + channels[j].max_duration = + cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); + channels[j].early_termination = 0; + channels[j].tx_power_att = req->channels[i]->max_power; + channels[j].channel = req->channels[i]->hw_value; + + memset(&channels[j].bssid_lsb, 0xff, 4); + memset(&channels[j].bssid_msb, 0xff, 2); + + /* Mark the channels we already used */ + wl->scan.scanned_ch[i] = true; + + j++; + } + } + + return j; +} + +#define WL1271_NOTHING_TO_SCAN 1 + +static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band, + bool passive, u32 basic_rate) +{ + struct wl1271_cmd_scan *cmd; + struct wl1271_cmd_trigger_scan_to *trigger; + int ret; + u16 scan_options = 0; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); + if (!cmd || !trigger) { + ret = -ENOMEM; + goto out; + } + + /* We always use high priority scans */ + scan_options = WL1271_SCAN_OPT_PRIORITY_HIGH; + if(passive) + scan_options |= WL1271_SCAN_OPT_PASSIVE; + cmd->params.scan_options = cpu_to_le16(scan_options); + + cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, + cmd->channels, + band, passive); + if (cmd->params.n_ch == 0) { + ret = WL1271_NOTHING_TO_SCAN; + goto out; + } + + cmd->params.tx_rate = cpu_to_le32(basic_rate); + cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD); + cmd->params.rx_filter_options = + cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN); + + cmd->params.n_probe_reqs = WL1271_SCAN_PROBE_REQS; + cmd->params.tx_rate = cpu_to_le32(basic_rate); + cmd->params.tid_trigger = 0; + cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; + + if (band == IEEE80211_BAND_2GHZ) + cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ; + else + cmd->params.band = WL1271_SCAN_BAND_5_GHZ; + + if (wl->scan.ssid_len && wl->scan.ssid) { + cmd->params.ssid_len = wl->scan.ssid_len; + memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); + } + + ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len, + wl->scan.req->ie, wl->scan.req->ie_len, + band); + if (ret < 0) { + wl1271_error("PROBE request template failed"); + goto out; + } + + /* disable the timeout */ + trigger->timeout = 0; + ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, + sizeof(*trigger), 0); + if (ret < 0) { + wl1271_error("trigger scan to failed for hw scan"); + goto out; + } + + wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("SCAN failed"); + goto out; + } + +out: + kfree(cmd); + kfree(trigger); + return ret; +} + +void wl1271_scan_stm(struct wl1271 *wl) +{ + int ret; + + switch (wl->scan.state) { + case WL1271_SCAN_STATE_IDLE: + break; + + case WL1271_SCAN_STATE_2GHZ_ACTIVE: + ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, false, + wl->conf.tx.basic_rate); + if (ret == WL1271_NOTHING_TO_SCAN) { + wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE; + wl1271_scan_stm(wl); + } + + break; + + case WL1271_SCAN_STATE_2GHZ_PASSIVE: + ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, true, + wl->conf.tx.basic_rate); + if (ret == WL1271_NOTHING_TO_SCAN) { + if (wl1271_11a_enabled()) + wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; + else + wl->scan.state = WL1271_SCAN_STATE_DONE; + wl1271_scan_stm(wl); + } + + break; + + case WL1271_SCAN_STATE_5GHZ_ACTIVE: + ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, false, + wl->conf.tx.basic_rate_5); + if (ret == WL1271_NOTHING_TO_SCAN) { + wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE; + wl1271_scan_stm(wl); + } + + break; + + case WL1271_SCAN_STATE_5GHZ_PASSIVE: + ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, true, + wl->conf.tx.basic_rate_5); + if (ret == WL1271_NOTHING_TO_SCAN) { + wl->scan.state = WL1271_SCAN_STATE_DONE; + wl1271_scan_stm(wl); + } + + break; + + case WL1271_SCAN_STATE_DONE: + mutex_unlock(&wl->mutex); + ieee80211_scan_completed(wl->hw, false); + mutex_lock(&wl->mutex); + + kfree(wl->scan.scanned_ch); + wl->scan.scanned_ch = NULL; + + wl->scan.state = WL1271_SCAN_STATE_IDLE; + break; + + default: + wl1271_error("invalid scan state"); + break; + } +} + +int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, + struct cfg80211_scan_request *req) +{ + if (wl->scan.state != WL1271_SCAN_STATE_IDLE) + return -EBUSY; + + wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE; + + if (ssid_len && ssid) { + wl->scan.ssid_len = ssid_len; + memcpy(wl->scan.ssid, ssid, ssid_len); + } else { + wl->scan.ssid_len = 0; + } + + wl->scan.req = req; + + wl->scan.scanned_ch = kzalloc(req->n_channels * + sizeof(*wl->scan.scanned_ch), + GFP_KERNEL); + wl1271_scan_stm(wl); + + return 0; +} diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.h b/drivers/net/wireless/wl12xx/wl1271_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..f1815700f5f9da97464cba2f727920fd45d8f219 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_scan.h @@ -0,0 +1,109 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_SCAN_H__ +#define __WL1271_SCAN_H__ + +#include "wl1271.h" + +int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, + struct cfg80211_scan_request *req); +int wl1271_scan_build_probe_req(struct wl1271 *wl, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, u8 band); +void wl1271_scan_stm(struct wl1271 *wl); + +#define WL1271_SCAN_MAX_CHANNELS 24 +#define WL1271_SCAN_DEFAULT_TAG 1 +#define WL1271_SCAN_CURRENT_TX_PWR 0 +#define WL1271_SCAN_OPT_ACTIVE 0 +#define WL1271_SCAN_OPT_PASSIVE 1 +#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 +#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */ +#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */ +#define WL1271_SCAN_BAND_2_4_GHZ 0 +#define WL1271_SCAN_BAND_5_GHZ 1 +#define WL1271_SCAN_PROBE_REQS 3 + +enum { + WL1271_SCAN_STATE_IDLE, + WL1271_SCAN_STATE_2GHZ_ACTIVE, + WL1271_SCAN_STATE_2GHZ_PASSIVE, + WL1271_SCAN_STATE_5GHZ_ACTIVE, + WL1271_SCAN_STATE_5GHZ_PASSIVE, + WL1271_SCAN_STATE_DONE +}; + +struct basic_scan_params { + __le32 rx_config_options; + __le32 rx_filter_options; + /* Scan option flags (WL1271_SCAN_OPT_*) */ + __le16 scan_options; + /* Number of scan channels in the list (maximum 30) */ + u8 n_ch; + /* This field indicates the number of probe requests to send + per channel for an active scan */ + u8 n_probe_reqs; + /* Rate bit field for sending the probes */ + __le32 tx_rate; + u8 tid_trigger; + u8 ssid_len; + /* in order to align */ + u8 padding1[2]; + u8 ssid[IW_ESSID_MAX_SIZE]; + /* Band to scan */ + u8 band; + u8 use_ssid_list; + u8 scan_tag; + u8 padding2; +} __packed; + +struct basic_scan_channel_params { + /* Duration in TU to wait for frames on a channel for active scan */ + __le32 min_duration; + __le32 max_duration; + __le32 bssid_lsb; + __le16 bssid_msb; + u8 early_termination; + u8 tx_power_att; + u8 channel; + /* FW internal use only! */ + u8 dfs_candidate; + u8 activity_detected; + u8 pad; +} __packed; + +struct wl1271_cmd_scan { + struct wl1271_cmd_header header; + + struct basic_scan_params params; + struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; +} __packed; + +struct wl1271_cmd_trigger_scan_to { + struct wl1271_cmd_header header; + + __le32 timeout; +} __packed; + +#endif /* __WL1271_SCAN_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c index d3d6f302f7058854178ca87dfa0713d8a4004e67..7059b5cccf0f8fff9651017607e4d18b75d78b30 100644 --- a/drivers/net/wireless/wl12xx/wl1271_sdio.c +++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include "wl1271.h" #include "wl12xx_80211.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c index 5189b812f939e4744b60b46ad637bd7332d1c11e..96d25fb50495eac40cc160619ab7676d1a933091 100644 --- a/drivers/net/wireless/wl12xx/wl1271_spi.c +++ b/drivers/net/wireless/wl12xx/wl1271_spi.c @@ -461,3 +461,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho "); MODULE_AUTHOR("Juuso Oikarinen "); MODULE_FIRMWARE(WL1271_FW_NAME); +MODULE_ALIAS("spi:wl1271"); diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c index 554deb4d024eae483d6895a5da9b86868d26c158..6e0952f79e9a79e942a4ed3182ab6f419cba07c6 100644 --- a/drivers/net/wireless/wl12xx/wl1271_testmode.c +++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c @@ -199,7 +199,14 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) buf = nla_data(tb[WL1271_TM_ATTR_DATA]); len = nla_len(tb[WL1271_TM_ATTR_DATA]); - if (len != sizeof(struct wl1271_nvs_file)) { + /* + * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band + * configurations) can be removed when those NVS files stop floating + * around. + */ + if (len != sizeof(struct wl1271_nvs_file) && + (len != WL1271_INI_LEGACY_NVS_FILE_SIZE || + wl1271_11a_enabled())) { wl1271_error("nvs size is not as expected: %zu != %zu", len, sizeof(struct wl1271_nvs_file)); return -EMSGSIZE; @@ -209,7 +216,7 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) kfree(wl->nvs); - wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); + wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); if (!wl->nvs) { wl1271_error("could not allocate memory for the nvs file"); ret = -ENOMEM; diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c index 62db79508ddfa812b7b96f01cd355a0928b9d97c..c592cc2e9fe88690bcfb97014a423489fbbedb6d 100644 --- a/drivers/net/wireless/wl12xx/wl1271_tx.c +++ b/drivers/net/wireless/wl12xx/wl1271_tx.c @@ -36,6 +36,7 @@ static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) for (i = 0; i < ACX_TX_DESCRIPTORS; i++) if (wl->tx_frames[i] == NULL) { wl->tx_frames[i] = skb; + wl->tx_frames_cnt++; return i; } @@ -73,8 +74,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra) wl1271_debug(DEBUG_TX, "tx_allocate: size: %d, blocks: %d, id: %d", total_len, total_blocks, id); - } else + } else { wl->tx_frames[id] = NULL; + wl->tx_frames_cnt--; + } return ret; } @@ -358,6 +361,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, /* return the packet to the stack */ ieee80211_tx_status(wl->hw, skb); wl->tx_frames[result->id] = NULL; + wl->tx_frames_cnt--; } /* Called upon reception of a TX complete interrupt */ @@ -412,7 +416,7 @@ void wl1271_tx_complete(struct wl1271 *wl) } /* caller must hold wl->mutex */ -void wl1271_tx_flush(struct wl1271 *wl) +void wl1271_tx_reset(struct wl1271 *wl) { int i; struct sk_buff *skb; @@ -421,7 +425,7 @@ void wl1271_tx_flush(struct wl1271 *wl) /* control->flags = 0; FIXME */ while ((skb = skb_dequeue(&wl->tx_queue))) { - wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb); + wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); ieee80211_tx_status(wl->hw, skb); } @@ -429,6 +433,32 @@ void wl1271_tx_flush(struct wl1271 *wl) if (wl->tx_frames[i] != NULL) { skb = wl->tx_frames[i]; wl->tx_frames[i] = NULL; + wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); ieee80211_tx_status(wl->hw, skb); } + wl->tx_frames_cnt = 0; +} + +#define WL1271_TX_FLUSH_TIMEOUT 500000 + +/* caller must *NOT* hold wl->mutex */ +void wl1271_tx_flush(struct wl1271 *wl) +{ + unsigned long timeout; + timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); + + while (!time_after(jiffies, timeout)) { + mutex_lock(&wl->mutex); + wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", + wl->tx_frames_cnt); + if ((wl->tx_frames_cnt == 0) && + skb_queue_empty(&wl->tx_queue)) { + mutex_unlock(&wl->mutex); + return; + } + mutex_unlock(&wl->mutex); + msleep(1); + } + + wl1271_warning("Unable to flush all TX buffers, timed out."); } diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h index 3b8b7ac253fd544ed594d45f9ea7e74f51e59486..48bf92621c03390f9d6e40efa51f79674c766108 100644 --- a/drivers/net/wireless/wl12xx/wl1271_tx.h +++ b/drivers/net/wireless/wl12xx/wl1271_tx.h @@ -80,7 +80,7 @@ struct wl1271_tx_hw_descr { /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ u8 aid; u8 reserved; -} __attribute__ ((packed)); +} __packed; enum wl1271_tx_hw_res_status { TX_SUCCESS = 0, @@ -115,13 +115,13 @@ struct wl1271_tx_hw_res_descr { u8 rate_class_index; /* for 4-byte alignment. */ u8 spare; -} __attribute__ ((packed)); +} __packed; struct wl1271_tx_hw_res_if { __le32 tx_result_fw_counter; __le32 tx_result_host_counter; struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; -} __attribute__ ((packed)); +} __packed; static inline int wl1271_tx_get_queue(int queue) { @@ -158,6 +158,7 @@ static inline int wl1271_tx_ac_to_tid(int ac) void wl1271_tx_work(struct work_struct *work); void wl1271_tx_complete(struct wl1271 *wl); +void wl1271_tx_reset(struct wl1271 *wl); void wl1271_tx_flush(struct wl1271 *wl); u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h index 055d7bc6f59269e9b42dff9c15a61feee6ea03c8..1846280272139ec110e5089d84f70a95b60ffd8b 100644 --- a/drivers/net/wireless/wl12xx/wl12xx_80211.h +++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h @@ -66,41 +66,41 @@ struct ieee80211_header { u8 bssid[ETH_ALEN]; __le16 seq_ctl; u8 payload[0]; -} __attribute__ ((packed)); +} __packed; struct wl12xx_ie_header { u8 id; u8 len; -} __attribute__ ((packed)); +} __packed; /* IEs */ struct wl12xx_ie_ssid { struct wl12xx_ie_header header; char ssid[IW_ESSID_MAX_SIZE]; -} __attribute__ ((packed)); +} __packed; struct wl12xx_ie_rates { struct wl12xx_ie_header header; u8 rates[MAX_SUPPORTED_RATES]; -} __attribute__ ((packed)); +} __packed; struct wl12xx_ie_ds_params { struct wl12xx_ie_header header; u8 channel; -} __attribute__ ((packed)); +} __packed; struct country_triplet { u8 channel; u8 num_channels; u8 max_tx_power; -} __attribute__ ((packed)); +} __packed; struct wl12xx_ie_country { struct wl12xx_ie_header header; u8 country_string[COUNTRY_STRING_LEN]; struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; -} __attribute__ ((packed)); +} __packed; /* Templates */ @@ -115,30 +115,30 @@ struct wl12xx_beacon_template { struct wl12xx_ie_rates ext_rates; struct wl12xx_ie_ds_params ds_params; struct wl12xx_ie_country country; -} __attribute__ ((packed)); +} __packed; struct wl12xx_null_data_template { struct ieee80211_header header; -} __attribute__ ((packed)); +} __packed; struct wl12xx_ps_poll_template { __le16 fc; __le16 aid; u8 bssid[ETH_ALEN]; u8 ta[ETH_ALEN]; -} __attribute__ ((packed)); +} __packed; struct wl12xx_qos_null_data_template { struct ieee80211_header header; __le16 qos_ctl; -} __attribute__ ((packed)); +} __packed; struct wl12xx_probe_req_template { struct ieee80211_header header; struct wl12xx_ie_ssid ssid; struct wl12xx_ie_rates rates; struct wl12xx_ie_rates ext_rates; -} __attribute__ ((packed)); +} __packed; struct wl12xx_probe_resp_template { @@ -151,6 +151,6 @@ struct wl12xx_probe_resp_template { struct wl12xx_ie_rates ext_rates; struct wl12xx_ie_ds_params ds_params; struct wl12xx_ie_country country; -} __attribute__ ((packed)); +} __packed; #endif diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h index 8816e371fd0eda8843a693bd081ac0737b28f0d2..3fbfd19818f1a9eaee1e85b26aed04e224b0980b 100644 --- a/drivers/net/wireless/wl3501.h +++ b/drivers/net/wireless/wl3501.h @@ -231,12 +231,12 @@ struct iw_mgmt_info_element { but sizeof(enum) > sizeof(u8) :-( */ u8 len; u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct iw_mgmt_essid_pset { struct iw_mgmt_info_element el; u8 essid[IW_ESSID_MAX_SIZE]; -} __attribute__ ((packed)); +} __packed; /* * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly @@ -247,12 +247,12 @@ struct iw_mgmt_essid_pset { struct iw_mgmt_data_rset { struct iw_mgmt_info_element el; u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS]; -} __attribute__ ((packed)); +} __packed; struct iw_mgmt_ds_pset { struct iw_mgmt_info_element el; u8 chan; -} __attribute__ ((packed)); +} __packed; struct iw_mgmt_cf_pset { struct iw_mgmt_info_element el; @@ -260,12 +260,12 @@ struct iw_mgmt_cf_pset { u8 cfp_period; u16 cfp_max_duration; u16 cfp_dur_remaining; -} __attribute__ ((packed)); +} __packed; struct iw_mgmt_ibss_pset { struct iw_mgmt_info_element el; u16 atim_window; -} __attribute__ ((packed)); +} __packed; struct wl3501_tx_hdr { u16 tx_cnt; @@ -544,12 +544,12 @@ struct wl3501_80211_tx_plcp_hdr { u8 service; u16 len; u16 crc16; -} __attribute__ ((packed)); +} __packed; struct wl3501_80211_tx_hdr { struct wl3501_80211_tx_plcp_hdr pclp_hdr; struct ieee80211_hdr mac_hdr; -} __attribute__ ((packed)); +} __packed; /* Reserve the beginning Tx space for descriptor use. diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index b0b666019a9321752bb8f69ff4fa8ecca567b01f..43307bd42a697c3bad30ee69a89e573a07146f48 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -42,7 +42,8 @@ static struct zd_reg_alpha2_map reg_alpha2_map[] = { { ZD_REGDOMAIN_IC, "CA" }, { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */ { ZD_REGDOMAIN_JAPAN, "JP" }, - { ZD_REGDOMAIN_JAPAN_ADD, "JP" }, + { ZD_REGDOMAIN_JAPAN_2, "JP" }, + { ZD_REGDOMAIN_JAPAN_3, "JP" }, { ZD_REGDOMAIN_SPAIN, "ES" }, { ZD_REGDOMAIN_FRANCE, "FR" }, }; @@ -855,7 +856,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) if (skb == NULL) return -ENOMEM; if (need_padding) { - /* Make sure the the payload data is 4 byte aligned. */ + /* Make sure the payload data is 4 byte aligned. */ skb_reserve(skb, 2); } diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index 630c298a730ec468e43601900c1c4119b54aa106..a6d86b996c7984a3994ec2c8a69eb7fed26a88e9 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h @@ -35,7 +35,7 @@ struct zd_ctrlset { __le16 current_length; u8 service; __le16 next_frame_length; -} __attribute__((packed)); +} __packed; #define ZD_CS_RESERVED_SIZE 25 @@ -106,7 +106,7 @@ struct zd_ctrlset { struct rx_length_info { __le16 length[3]; __le16 tag; -} __attribute__((packed)); +} __packed; #define RX_LENGTH_INFO_TAG 0x697e @@ -117,7 +117,7 @@ struct rx_status { u8 signal_quality_ofdm; u8 decryption_type; u8 frame_status; -} __attribute__((packed)); +} __packed; /* rx_status field decryption_type */ #define ZD_RX_NO_WEP 0 @@ -153,7 +153,7 @@ struct tx_status { u8 mac[ETH_ALEN]; u8 retry; u8 failure; -} __attribute__((packed)); +} __packed; enum mac_flags { MAC_FIXED_CHANNEL = 0x01, @@ -212,8 +212,9 @@ struct zd_mac { #define ZD_REGDOMAIN_ETSI 0x30 #define ZD_REGDOMAIN_SPAIN 0x31 #define ZD_REGDOMAIN_FRANCE 0x32 -#define ZD_REGDOMAIN_JAPAN_ADD 0x40 +#define ZD_REGDOMAIN_JAPAN_2 0x40 #define ZD_REGDOMAIN_JAPAN 0x41 +#define ZD_REGDOMAIN_JAPAN_3 0x49 enum { MIN_CHANNEL24 = 1, @@ -225,7 +226,7 @@ enum { struct ofdm_plcp_header { u8 prefix[3]; __le16 service; -} __attribute__((packed)); +} __packed; static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) { @@ -252,7 +253,7 @@ struct cck_plcp_header { u8 service; __le16 length; __le16 crc16; -} __attribute__((packed)); +} __packed; static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) { diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index c257940b71b689012c653cb359a589489300ada3..818e1480ca93f1a5e34d13b0e04163b135e68b29 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -844,7 +844,7 @@ static struct urb *alloc_tx_urb(struct zd_usb *usb) * @usb: a &struct zd_usb pointer * @urb: URB to be freed * - * Frees the the transmission URB, which means to put it on the free URB + * Frees the transmission URB, which means to put it on the free URB * list. */ static void free_tx_urb(struct zd_usb *usb, struct urb *urb) diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h index 049f8b91f0204f91782819aadd00f991ef422c3a..1b1655cb7cb4baf6f04d4ae8525a0066e0f117bf 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.h +++ b/drivers/net/wireless/zd1211rw/zd_usb.h @@ -79,17 +79,17 @@ enum control_requests { struct usb_req_read_regs { __le16 id; __le16 addr[0]; -} __attribute__((packed)); +} __packed; struct reg_data { __le16 addr; __le16 value; -} __attribute__((packed)); +} __packed; struct usb_req_write_regs { __le16 id; struct reg_data reg_writes[0]; -} __attribute__((packed)); +} __packed; enum { RF_IF_LE = 0x02, @@ -106,7 +106,7 @@ struct usb_req_rfwrite { /* RF2595: 24 */ __le16 bit_values[0]; /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ -} __attribute__((packed)); +} __packed; /* USB interrupt */ @@ -123,12 +123,12 @@ enum usb_int_flags { struct usb_int_header { u8 type; /* must always be 1 */ u8 id; -} __attribute__((packed)); +} __packed; struct usb_int_regs { struct usb_int_header hdr; struct reg_data regs[0]; -} __attribute__((packed)); +} __packed; struct usb_int_retry_fail { struct usb_int_header hdr; @@ -136,7 +136,7 @@ struct usb_int_retry_fail { u8 _dummy; u8 addr[ETH_ALEN]; u8 ibss_wakeup_dest; -} __attribute__((packed)); +} __packed; struct read_regs_int { struct completion completion; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d504e2b6025705ded7ee1ac4524791818d8c5ec5..b50fedcef8ac9dd9872a51ed63cb0b6c3135c778 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev, if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); + netif_notify_peers(netdev); break; case XenbusStateClosing: diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c index fdba9cb3a599e65b77522008930f98226722f4c4..9f12026d98e7f9a94a8ac5f42e662de9ce5010f9 100644 --- a/drivers/net/xtsonic.c +++ b/drivers/net/xtsonic.c @@ -93,12 +93,20 @@ static unsigned short known_revisions[] = static int xtsonic_open(struct net_device *dev) { - if (request_irq(dev->irq,sonic_interrupt,IRQF_DISABLED,"sonic",dev)) { + int retval; + + retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, + "sonic", dev); + if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } - return sonic_open(dev); + + retval = sonic_open(dev); + if (retval) + free_irq(dev->irq, dev); + return retval; } static int xtsonic_close(struct net_device *dev) diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index d02be78a41386b2ecb94c249cff8da11776ecb4a..c5c14dd3734f6a022f102417ed510e2f73ca5267 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -357,12 +357,13 @@ static __inline__ int led_get_net_activity(void) rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { const struct net_device_stats *stats; + struct rtnl_link_stats64 temp; struct in_device *in_dev = __in_dev_get_rcu(dev); if (!in_dev || !in_dev->ifa_list) continue; if (ipv4_is_loopback(in_dev->ifa_list->ifa_local)) continue; - stats = dev_get_stats(dev); + stats = dev_get_stats(dev, &temp); rx_total += stats->rx_packets; tx_total += stats->tx_packets; } diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 147bb1a69aba6bb42ac76cb4ccb935fa9645ef19..a75ed3083a6ae266d66b9630140b42db8d07266e 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -295,7 +295,7 @@ claw_driver_group_store(struct device_driver *ddrv, const char *buf, int err; err = ccwgroup_create_from_string(claw_root_dev, claw_group_driver.driver_id, - &claw_ccw_driver, 3, buf); + &claw_ccw_driver, 2, buf); return err ? err : count; } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 7a44c38aaf65bdbd7fcc8c69c2712a21c053deeb..d1257768be90053f1a6d93c4c048b2de0bb89bd4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -40,11 +40,7 @@ */ enum qeth_dbf_names { QETH_DBF_SETUP, - QETH_DBF_QERR, - QETH_DBF_TRACE, QETH_DBF_MSG, - QETH_DBF_SENSE, - QETH_DBF_MISC, QETH_DBF_CTRL, QETH_DBF_INFOS /* must be last element */ }; @@ -71,7 +67,19 @@ struct qeth_dbf_info { debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) #define QETH_DBF_TEXT_(name, level, text...) \ - qeth_dbf_longtext(QETH_DBF_##name, level, text) + qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text) + +#define QETH_CARD_TEXT(card, level, text) \ + debug_text_event(card->debug, level, text) + +#define QETH_CARD_HEX(card, level, addr, len) \ + debug_event(card->debug, level, (void *)(addr), len) + +#define QETH_CARD_MESSAGE(card, text...) \ + debug_sprintf_event(card->debug, level, text) + +#define QETH_CARD_TEXT_(card, level, text...) \ + qeth_dbf_longtext(card->debug, level, text) #define SENSE_COMMAND_REJECT_BYTE 0 #define SENSE_COMMAND_REJECT_FLAG 0x80 @@ -180,8 +188,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) #define QETH_IDX_FUNC_LEVEL_OSD 0x0101 -#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108 -#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108 +#define QETH_IDX_FUNC_LEVEL_IQD 0x4108 #define QETH_MODELLIST_ARRAY \ {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \ @@ -733,12 +740,15 @@ struct qeth_card { struct qeth_qdio_info qdio; struct qeth_perf_stats perf_stats; int use_hard_stop; + int read_or_write_problem; struct qeth_osn_info osn_info; struct qeth_discipline discipline; atomic_t force_alloc_skb; struct service_level qeth_service_level; struct qdio_ssqd_desc ssqd; + debug_info_t *debug; struct mutex conf_mutex; + struct mutex discipline_mutex; }; struct qeth_card_list_struct { @@ -857,9 +867,10 @@ void qeth_core_get_ethtool_stats(struct net_device *, struct ethtool_stats *, u64 *); void qeth_core_get_strings(struct net_device *, u32, u8 *); void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); -void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); +void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); int qeth_set_access_ctrl_online(struct qeth_card *card); +int qeth_hdr_chk_and_bounce(struct sk_buff *, int); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 13ef46b9d388714abb3c72efaa1dcffc6f259a43..3a5a18a0fc283de5a987365e80dead90a9b6a9ea 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -32,16 +32,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { /* N P A M L V H */ [QETH_DBF_SETUP] = {"qeth_setup", 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, - [QETH_DBF_QERR] = {"qeth_qerr", - 2, 1, 8, 2, &debug_hex_ascii_view, NULL}, - [QETH_DBF_TRACE] = {"qeth_trace", - 4, 1, 8, 3, &debug_hex_ascii_view, NULL}, [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 128, 3, &debug_sprintf_view, NULL}, - [QETH_DBF_SENSE] = {"qeth_sense", - 2, 1, 64, 2, &debug_hex_ascii_view, NULL}, - [QETH_DBF_MISC] = {"qeth_misc", - 2, 1, 256, 2, &debug_hex_ascii_view, NULL}, [QETH_DBF_CTRL] = {"qeth_control", 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, }; @@ -65,48 +57,6 @@ static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); -static inline void __qeth_fill_buffer_frag(struct sk_buff *skb, - struct qdio_buffer *buffer, int is_tso, - int *next_element_to_fill) -{ - struct skb_frag_struct *frag; - int fragno; - unsigned long addr; - int element, cnt, dlen; - - fragno = skb_shinfo(skb)->nr_frags; - element = *next_element_to_fill; - dlen = 0; - - if (is_tso) - buffer->element[element].flags = - SBAL_FLAGS_MIDDLE_FRAG; - else - buffer->element[element].flags = - SBAL_FLAGS_FIRST_FRAG; - dlen = skb->len - skb->data_len; - if (dlen) { - buffer->element[element].addr = skb->data; - buffer->element[element].length = dlen; - element++; - } - for (cnt = 0; cnt < fragno; cnt++) { - frag = &skb_shinfo(skb)->frags[cnt]; - addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + - frag->page_offset; - buffer->element[element].addr = (char *)addr; - buffer->element[element].length = frag->size; - if (cnt < (fragno - 1)) - buffer->element[element].flags = - SBAL_FLAGS_MIDDLE_FRAG; - else - buffer->element[element].flags = - SBAL_FLAGS_LAST_FRAG; - element++; - } - *next_element_to_fill = element; -} - static inline const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { @@ -232,7 +182,7 @@ void qeth_clear_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; - QETH_DBF_TEXT(TRACE, 5, "clwrklst"); + QETH_CARD_TEXT(card, 5, "clwrklst"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.in_buf_pool.entry_list, list){ list_del(&pool_entry->list); @@ -246,7 +196,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) void *ptr; int i, j; - QETH_DBF_TEXT(TRACE, 5, "alocpool"); + QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); if (!pool_entry) { @@ -273,7 +223,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) { - QETH_DBF_TEXT(TRACE, 2, "realcbp"); + QETH_CARD_TEXT(card, 2, "realcbp"); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) @@ -293,7 +243,7 @@ static int qeth_issue_next_read(struct qeth_card *card) int rc; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 5, "issnxrd"); + QETH_CARD_TEXT(card, 5, "issnxrd"); if (card->read.state != CH_STATE_UP) return -EIO; iob = qeth_get_buffer(&card->read); @@ -305,13 +255,14 @@ static int qeth_issue_next_read(struct qeth_card *card) return -ENOMEM; } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); - QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); + QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); if (rc) { QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " "rc=%i\n", dev_name(&card->gdev->dev), rc); atomic_set(&card->read.irq_pending, 0); + card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); } @@ -364,7 +315,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, { struct qeth_ipa_cmd *cmd = NULL; - QETH_DBF_TEXT(TRACE, 5, "chkipad"); + QETH_CARD_TEXT(card, 5, "chkipad"); if (IS_IPA(iob->data)) { cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); if (IS_IPA_REPLY(cmd)) { @@ -400,10 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, case IPA_CMD_MODCCID: return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: - QETH_DBF_TEXT(TRACE, 3, "irla"); + QETH_CARD_TEXT(card, 3, "irla"); break; case IPA_CMD_UNREGISTER_LOCAL_ADDR: - QETH_DBF_TEXT(TRACE, 3, "urla"); + QETH_CARD_TEXT(card, 3, "urla"); break; default: QETH_DBF_MESSAGE(2, "Received data is IPA " @@ -420,7 +371,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) struct qeth_reply *reply, *r; unsigned long flags; - QETH_DBF_TEXT(TRACE, 4, "clipalst"); + QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { @@ -432,6 +383,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); + atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -448,9 +400,9 @@ static int qeth_check_idx_response(struct qeth_card *card, buffer[4], ((buffer[4] == 0x22) ? " -- try another portname" : "")); - QETH_DBF_TEXT(TRACE, 2, "ckidxres"); - QETH_DBF_TEXT(TRACE, 2, " idxterm"); - QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); + QETH_CARD_TEXT(card, 2, "ckidxres"); + QETH_CARD_TEXT(card, 2, " idxterm"); + QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); if (buffer[4] == 0xf6) { dev_err(&card->gdev->dev, "The qeth device is not configured " @@ -467,8 +419,8 @@ static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 4, "setupccw"); card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 4, "setupccw"); if (channel == &card->read) memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); else @@ -481,7 +433,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; - QETH_DBF_TEXT(TRACE, 6, "getbuff"); + QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); index = channel->io_buf_no; do { if (channel->iob[index].state == BUF_STATE_FREE) { @@ -502,7 +454,7 @@ void qeth_release_buffer(struct qeth_channel *channel, { unsigned long flags; - QETH_DBF_TEXT(TRACE, 6, "relbuff"); + QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); spin_lock_irqsave(&channel->iob_lock, flags); memset(iob->data, 0, QETH_BUFSIZE); iob->state = BUF_STATE_FREE; @@ -553,9 +505,8 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, int keep_reply; int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "sndctlcb"); - card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 4, "sndctlcb"); rc = qeth_check_idx_response(card, iob->data); switch (rc) { case 0: @@ -563,6 +514,7 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, case -EIO: qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); + /* fall through */ default: goto out; } @@ -722,7 +674,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { - QETH_DBF_TEXT(TRACE, 2, "startrec"); + QETH_CARD_TEXT(card, 2, "startrec"); if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } @@ -732,15 +684,17 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) { int dstat, cstat; char *sense; + struct qeth_card *card; sense = (char *) irb->ecw; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; + card = CARD_FROM_CDEV(cdev); if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { - QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); + QETH_CARD_TEXT(card, 2, "CGENCHK"); dev_warn(&cdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", @@ -753,23 +707,23 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) if (dstat & DEV_STAT_UNIT_CHECK) { if (sense[SENSE_RESETTING_EVENT_BYTE] & SENSE_RESETTING_EVENT_FLAG) { - QETH_DBF_TEXT(TRACE, 2, "REVIND"); + QETH_CARD_TEXT(card, 2, "REVIND"); return 1; } if (sense[SENSE_COMMAND_REJECT_BYTE] & SENSE_COMMAND_REJECT_FLAG) { - QETH_DBF_TEXT(TRACE, 2, "CMDREJi"); + QETH_CARD_TEXT(card, 2, "CMDREJi"); return 1; } if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { - QETH_DBF_TEXT(TRACE, 2, "AFFE"); + QETH_CARD_TEXT(card, 2, "AFFE"); return 1; } if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { - QETH_DBF_TEXT(TRACE, 2, "ZEROSEN"); + QETH_CARD_TEXT(card, 2, "ZEROSEN"); return 0; } - QETH_DBF_TEXT(TRACE, 2, "DGENCHK"); + QETH_CARD_TEXT(card, 2, "DGENCHK"); return 1; } return 0; @@ -778,6 +732,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) static long __qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { + struct qeth_card *card; + + card = CARD_FROM_CDEV(cdev); + if (!IS_ERR(irb)) return 0; @@ -785,17 +743,15 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, case -EIO: QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", dev_name(&cdev->dev)); - QETH_DBF_TEXT(TRACE, 2, "ckirberr"); - QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); break; case -ETIMEDOUT: dev_warn(&cdev->dev, "A hardware operation timed out" " on the device\n"); - QETH_DBF_TEXT(TRACE, 2, "ckirberr"); - QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { - struct qeth_card *card = CARD_FROM_CDEV(cdev); - if (card && (card->data.ccwdev == cdev)) { card->data.state = CH_STATE_DOWN; wake_up(&card->wait_q); @@ -805,8 +761,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, default: QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", dev_name(&cdev->dev), PTR_ERR(irb)); - QETH_DBF_TEXT(TRACE, 2, "ckirberr"); - QETH_DBF_TEXT(TRACE, 2, " rc???"); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT(card, 2, " rc???"); } return PTR_ERR(irb); } @@ -822,8 +778,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct qeth_cmd_buffer *iob; __u8 index; - QETH_DBF_TEXT(TRACE, 5, "irq"); - if (__qeth_check_irb_error(cdev, intparm, irb)) return; cstat = irb->scsw.cmd.cstat; @@ -833,15 +787,17 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (!card) return; + QETH_CARD_TEXT(card, 5, "irq"); + if (card->read.ccwdev == cdev) { channel = &card->read; - QETH_DBF_TEXT(TRACE, 5, "read"); + QETH_CARD_TEXT(card, 5, "read"); } else if (card->write.ccwdev == cdev) { channel = &card->write; - QETH_DBF_TEXT(TRACE, 5, "write"); + QETH_CARD_TEXT(card, 5, "write"); } else { channel = &card->data; - QETH_DBF_TEXT(TRACE, 5, "data"); + QETH_CARD_TEXT(card, 5, "data"); } atomic_set(&channel->irq_pending, 0); @@ -857,12 +813,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, goto out; if (intparm == QETH_CLEAR_CHANNEL_PARM) { - QETH_DBF_TEXT(TRACE, 6, "clrchpar"); + QETH_CARD_TEXT(card, 6, "clrchpar"); /* we don't have to handle this further */ intparm = 0; } if (intparm == QETH_HALT_CHANNEL_PARM) { - QETH_DBF_TEXT(TRACE, 6, "hltchpar"); + QETH_CARD_TEXT(card, 6, "hltchpar"); /* we don't have to handle this further */ intparm = 0; } @@ -963,7 +919,7 @@ void qeth_clear_qdio_buffers(struct qeth_card *card) { int i, j; - QETH_DBF_TEXT(TRACE, 2, "clearqdbf"); + QETH_CARD_TEXT(card, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ for (i = 0; i < card->qdio.no_out_queues; ++i) if (card->qdio.out_qs[i]) { @@ -978,7 +934,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; int i = 0; - QETH_DBF_TEXT(TRACE, 5, "freepool"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.init_pool.entry_list, init_list){ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) @@ -992,7 +947,6 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) { int i, j; - QETH_DBF_TEXT(TRACE, 2, "freeqdbf"); if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; @@ -1089,7 +1043,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); - QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x", + QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", (u8) card->thread_start_mask, (u8) card->thread_allowed_mask, (u8) card->thread_running_mask); @@ -1102,7 +1056,7 @@ static void qeth_start_kernel_thread(struct work_struct *work) { struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); - QETH_DBF_TEXT(TRACE , 2, "strthrd"); + QETH_CARD_TEXT(card , 2, "strthrd"); if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) @@ -1124,6 +1078,7 @@ static int qeth_setup_card(struct qeth_card *card) card->state = CARD_STATE_DOWN; card->lan_online = 0; card->use_hard_stop = 0; + card->read_or_write_problem = 0; card->dev = NULL; spin_lock_init(&card->vlanlock); spin_lock_init(&card->mclock); @@ -1132,6 +1087,7 @@ static int qeth_setup_card(struct qeth_card *card) spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); + mutex_init(&card->discipline_mutex); card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; @@ -1229,8 +1185,8 @@ static int qeth_clear_channel(struct qeth_channel *channel) struct qeth_card *card; int rc; - QETH_DBF_TEXT(TRACE, 3, "clearch"); card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 3, "clearch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); @@ -1253,8 +1209,8 @@ static int qeth_halt_channel(struct qeth_channel *channel) struct qeth_card *card; int rc; - QETH_DBF_TEXT(TRACE, 3, "haltch"); card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 3, "haltch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); @@ -1274,7 +1230,7 @@ static int qeth_halt_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; - QETH_DBF_TEXT(TRACE, 3, "haltchs"); + QETH_CARD_TEXT(card, 3, "haltchs"); rc1 = qeth_halt_channel(&card->read); rc2 = qeth_halt_channel(&card->write); rc3 = qeth_halt_channel(&card->data); @@ -1289,7 +1245,7 @@ static int qeth_clear_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; - QETH_DBF_TEXT(TRACE, 3, "clearchs"); + QETH_CARD_TEXT(card, 3, "clearchs"); rc1 = qeth_clear_channel(&card->read); rc2 = qeth_clear_channel(&card->write); rc3 = qeth_clear_channel(&card->data); @@ -1304,8 +1260,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "clhacrd"); - QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 3, "clhacrd"); if (halt) rc = qeth_halt_channels(card); @@ -1318,7 +1273,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "qdioclr"); + QETH_CARD_TEXT(card, 3, "qdioclr"); switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, QETH_QDIO_CLEANING)) { case QETH_QDIO_ESTABLISHED: @@ -1329,7 +1284,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_CLEAR); if (rc) - QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc); + QETH_CARD_TEXT_(card, 3, "1err%d", rc); qdio_free(CARD_DDEV(card)); atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); break; @@ -1340,7 +1295,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) } rc = qeth_clear_halt_card(card, use_halt); if (rc) - QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc); + QETH_CARD_TEXT_(card, 3, "2err%d", rc); card->state = CARD_STATE_DOWN; return rc; } @@ -1432,14 +1387,10 @@ static void qeth_init_func_level(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_IQD: - if (card->ipato.enabled) - card->info.func_level = - QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT; - else - card->info.func_level = - QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT; + card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; break; case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSN: card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; break; default: @@ -1637,15 +1588,18 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, "host\n"); break; case QETH_IDX_ACT_ERR_AUTH: + case QETH_IDX_ACT_ERR_AUTH_USER: dev_err(&card->read.ccwdev->dev, "Setting the device online failed because of " - "insufficient LPAR authorization\n"); + "insufficient authorization\n"); break; default: QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" " negative reply\n", dev_name(&card->read.ccwdev->dev)); } + QETH_CARD_TEXT_(card, 2, "idxread%c", + QETH_IDX_ACT_CAUSE_CODE(iob->data)); goto out; } @@ -1705,8 +1659,12 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long timeout, event_timeout; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "sendctl"); + QETH_CARD_TEXT(card, 2, "sendctl"); + if (card->read_or_write_problem) { + qeth_release_buffer(iob->channel, iob); + return -EIO; + } reply = qeth_alloc_reply(card); if (!reply) { return -ENOMEM; @@ -1732,7 +1690,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, event_timeout = QETH_TIMEOUT; timeout = jiffies + event_timeout; - QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); + QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, (addr_t) iob, 0, 0); @@ -1741,7 +1699,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", dev_name(&card->write.ccwdev->dev), rc); - QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); spin_lock_irqsave(&card->lock, flags); list_del_init(&reply->list); qeth_put_reply(reply); @@ -1778,6 +1736,9 @@ int qeth_send_control_data(struct qeth_card *card, int len, spin_unlock_irqrestore(&reply->card->lock, flags); reply->rc = -ETIME; atomic_inc(&reply->received); + atomic_set(&card->write.irq_pending, 0); + qeth_release_buffer(iob->channel, iob); + card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; wake_up(&reply->wait_q); rc = reply->rc; qeth_put_reply(reply); @@ -1978,7 +1939,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, card->info.link_type = link_type; } else card->info.link_type = 0; - QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type); + QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2035,7 +1996,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, QETH_DBF_TEXT(SETUP, 2, "olmlimit"); dev_err(&card->gdev->dev, "A connection could not be " "established because of an OLM limit\n"); - rc = -EMLINK; + iob->rc = -EMLINK; } QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return rc; @@ -2335,7 +2296,7 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *entry; - QETH_DBF_TEXT(TRACE, 5, "inwrklst"); + QETH_CARD_TEXT(card, 5, "inwrklst"); list_for_each_entry(entry, &card->qdio.init_pool.entry_list, init_list) { @@ -2522,7 +2483,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int rc; char prot_type; - QETH_DBF_TEXT(TRACE, 4, "sendipa"); + QETH_CARD_TEXT(card, 4, "sendipa"); if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) @@ -2534,6 +2495,10 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, qeth_prepare_ipa_cmd(card, iob, prot_type); rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, reply_cb, reply_param); + if (rc == -ETIME) { + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + } return rc; } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); @@ -2582,7 +2547,7 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "defadpcb"); + QETH_CARD_TEXT(card, 4, "defadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code == 0) @@ -2597,7 +2562,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 3, "quyadpcb"); + QETH_CARD_TEXT(card, 3, "quyadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { @@ -2633,7 +2598,7 @@ int qeth_query_setadapterparms(struct qeth_card *card) int rc; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 3, "queryadp"); + QETH_CARD_TEXT(card, 3, "queryadp"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, sizeof(struct qeth_ipacmd_setadpparms)); rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); @@ -2645,13 +2610,12 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, unsigned int qdio_error, const char *dbftext) { if (qdio_error) { - QETH_DBF_TEXT(TRACE, 2, dbftext); - QETH_DBF_TEXT(QERR, 2, dbftext); - QETH_DBF_TEXT_(QERR, 2, " F15=%02X", + QETH_CARD_TEXT(card, 2, dbftext); + QETH_CARD_TEXT_(card, 2, " F15=%02X", buf->element[15].flags & 0xff); - QETH_DBF_TEXT_(QERR, 2, " F14=%02X", + QETH_CARD_TEXT_(card, 2, " F14=%02X", buf->element[14].flags & 0xff); - QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); + QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); if ((buf->element[15].flags & 0xff) == 0x12) { card->stats.rx_dropped++; return 0; @@ -2717,8 +2681,7 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) if (rc) { dev_warn(&card->gdev->dev, "QDIO reported an error, rc=%i\n", rc); - QETH_DBF_TEXT(TRACE, 2, "qinberr"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + QETH_CARD_TEXT(card, 2, "qinberr"); } queue->next_buf_to_init = (queue->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; @@ -2731,7 +2694,7 @@ static int qeth_handle_send_error(struct qeth_card *card, { int sbalf15 = buffer->buffer->element[15].flags & 0xff; - QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); + QETH_CARD_TEXT(card, 6, "hdsnderr"); if (card->info.type == QETH_CARD_TYPE_IQD) { if (sbalf15 == 0) { qdio_err = 0; @@ -2747,9 +2710,8 @@ static int qeth_handle_send_error(struct qeth_card *card, if ((sbalf15 >= 15) && (sbalf15 <= 31)) return QETH_SEND_ERROR_RETRY; - QETH_DBF_TEXT(TRACE, 1, "lnkfail"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04x %02x", + QETH_CARD_TEXT(card, 1, "lnkfail"); + QETH_CARD_TEXT_(card, 1, "%04x %02x", (u16)qdio_err, (u8)sbalf15); return QETH_SEND_ERROR_LINK_FAILURE; } @@ -2764,7 +2726,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) if (atomic_read(&queue->used_buffers) >= QETH_HIGH_WATERMARK_PACK){ /* switch non-PACKING -> PACKING */ - QETH_DBF_TEXT(TRACE, 6, "np->pack"); + QETH_CARD_TEXT(queue->card, 6, "np->pack"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_dp_p++; queue->do_pack = 1; @@ -2787,7 +2749,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) if (atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) { /* switch PACKING -> non-PACKING */ - QETH_DBF_TEXT(TRACE, 6, "pack->np"); + QETH_CARD_TEXT(queue->card, 6, "pack->np"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_p_dp++; queue->do_pack = 0; @@ -2896,9 +2858,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, /* ignore temporary SIGA errors without busy condition */ if (rc == QDIO_ERROR_SIGA_TARGET) return; - QETH_DBF_TEXT(TRACE, 2, "flushbuf"); - QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card)); + QETH_CARD_TEXT(queue->card, 2, "flushbuf"); + QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); /* this must not happen under normal circumstances. if it * happens something is really wrong -> recover */ @@ -2960,10 +2921,9 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, int i; unsigned qeth_send_err; - QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); + QETH_CARD_TEXT(card, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 2, "achkcond"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + QETH_CARD_TEXT(card, 2, "achkcond"); netif_stop_queue(card->dev); qeth_schedule_recovery(card); return; @@ -3033,13 +2993,11 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue); int qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb, int elems) { - int elements_needed = 0; + int dlen = skb->len - skb->data_len; + int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - + PFN_DOWN((unsigned long)skb->data); - if (skb_shinfo(skb)->nr_frags > 0) - elements_needed = (skb_shinfo(skb)->nr_frags + 1); - if (elements_needed == 0) - elements_needed = 1 + (((((unsigned long) skb->data) % - PAGE_SIZE) + skb->len) >> PAGE_SHIFT); + elements_needed += skb_shinfo(skb)->nr_frags; if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", @@ -3050,15 +3008,35 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, } EXPORT_SYMBOL_GPL(qeth_get_elements_no); +int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len) +{ + int hroom, inpage, rest; + + if (((unsigned long)skb->data & PAGE_MASK) != + (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { + hroom = skb_headroom(skb); + inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); + rest = len - inpage; + if (rest > hroom) + return 1; + memmove(skb->data - rest, skb->data, skb->len - skb->data_len); + skb->data -= rest; + QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); + } + return 0; +} +EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); + static inline void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, int offset) { - int length = skb->len; + int length = skb->len - skb->data_len; int length_here; int element; char *data; - int first_lap ; + int first_lap, cnt; + struct skb_frag_struct *frag; element = *next_element_to_fill; data = skb->data; @@ -3081,10 +3059,14 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, length -= length_here; if (!length) { if (first_lap) - buffer->element[element].flags = 0; + if (skb_shinfo(skb)->nr_frags) + buffer->element[element].flags = + SBAL_FLAGS_FIRST_FRAG; + else + buffer->element[element].flags = 0; else buffer->element[element].flags = - SBAL_FLAGS_LAST_FRAG; + SBAL_FLAGS_MIDDLE_FRAG; } else { if (first_lap) buffer->element[element].flags = @@ -3097,6 +3079,18 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, element++; first_lap = 0; } + + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + frag = &skb_shinfo(skb)->frags[cnt]; + buffer->element[element].addr = (char *)page_to_phys(frag->page) + + frag->page_offset; + buffer->element[element].length = frag->size; + buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; + element++; + } + + if (buffer->element[element - 1].flags) + buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG; *next_element_to_fill = element; } @@ -3137,20 +3131,16 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buf->next_element_to_fill++; } - if (skb_shinfo(skb)->nr_frags == 0) - __qeth_fill_buffer(skb, buffer, large_send, - (int *)&buf->next_element_to_fill, offset); - else - __qeth_fill_buffer_frag(skb, buffer, large_send, - (int *)&buf->next_element_to_fill); + __qeth_fill_buffer(skb, buffer, large_send, + (int *)&buf->next_element_to_fill, offset); if (!queue->do_pack) { - QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); + QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); /* set state to PRIMED -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } else { - QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); + QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); if (queue->card->options.performance_stats) queue->card->perf_stats.skbs_sent_pack++; if (buf->next_element_to_fill >= @@ -3210,7 +3200,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card, rc = dev_queue_xmit(skb); } else { dev_kfree_skb_any(skb); - QETH_DBF_TEXT(QERR, 2, "qrdrop"); + QETH_CARD_TEXT(card, 2, "qrdrop"); } } return 0; @@ -3312,14 +3302,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_ipacmd_setadpparms *setparms; - QETH_DBF_TEXT(TRACE, 4, "prmadpcb"); + QETH_CARD_TEXT(card, 4, "prmadpcb"); cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; @@ -3333,7 +3323,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "setprom"); + QETH_CARD_TEXT(card, 4, "setprom"); if (((dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || @@ -3343,7 +3333,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) mode = SET_PROMISC_MODE_OFF; if (dev->flags & IFF_PROMISC) mode = SET_PROMISC_MODE_ON; - QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode); + QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, sizeof(struct qeth_ipacmd_setadpparms)); @@ -3360,9 +3350,9 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu) card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "chgmtu"); + QETH_CARD_TEXT(card, 4, "chgmtu"); sprintf(dbf_text, "%8x", new_mtu); - QETH_DBF_TEXT(TRACE, 4, dbf_text); + QETH_CARD_TEXT(card, 4, dbf_text); if (new_mtu < 64) return -EINVAL; @@ -3382,7 +3372,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev) card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 5, "getstat"); + QETH_CARD_TEXT(card, 5, "getstat"); return &card->stats; } @@ -3393,7 +3383,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "chgmaccb"); + QETH_CARD_TEXT(card, 4, "chgmaccb"); cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || @@ -3413,7 +3403,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "chgmac"); + QETH_CARD_TEXT(card, 4, "chgmac"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, sizeof(struct qeth_ipacmd_setadpparms)); @@ -3433,9 +3423,8 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; - int rc; - QETH_DBF_TEXT(TRACE, 4, "setaccb"); + QETH_CARD_TEXT(card, 4, "setaccb"); cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; @@ -3460,7 +3449,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); - rc = 0; break; } case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: @@ -3474,7 +3462,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; - rc = -EOPNOTSUPP; break; } case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: @@ -3489,7 +3476,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; - rc = -EOPNOTSUPP; break; } case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: @@ -3503,7 +3489,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; - rc = -EPERM; break; } default: @@ -3517,12 +3502,11 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; - rc = 0; break; } } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); - return rc; + return 0; } static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, @@ -3533,7 +3517,7 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; - QETH_DBF_TEXT(TRACE, 4, "setacctl"); + QETH_CARD_TEXT(card, 4, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); @@ -3555,7 +3539,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "setactlo"); + QETH_CARD_TEXT(card, 4, "setactlo"); if ((card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) && @@ -3583,8 +3567,8 @@ void qeth_tx_timeout(struct net_device *dev) { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 4, "txtimeo"); card = dev->ml_priv; + QETH_CARD_TEXT(card, 4, "txtimeo"); card->stats.tx_errors++; qeth_schedule_recovery(card); } @@ -3663,7 +3647,7 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, { u16 s1, s2; - QETH_DBF_TEXT(TRACE, 4, "sendsnmp"); + QETH_CARD_TEXT(card, 4, "sendsnmp"); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), @@ -3688,7 +3672,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card, unsigned char *data; __u16 data_len; - QETH_DBF_TEXT(TRACE, 3, "snpcmdcb"); + QETH_CARD_TEXT(card, 3, "snpcmdcb"); cmd = (struct qeth_ipa_cmd *) sdata; data = (unsigned char *)((char *)cmd - reply->offset); @@ -3696,13 +3680,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code); return 0; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; - QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code); return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); @@ -3713,13 +3697,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { - QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM); + QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); cmd->hdr.return_code = -ENOMEM; return 0; } - QETH_DBF_TEXT_(TRACE, 4, "snore%i", + QETH_CARD_TEXT_(card, 4, "snore%i", cmd->data.setadapterparms.hdr.used_total); - QETH_DBF_TEXT_(TRACE, 4, "sseqn%i", + QETH_CARD_TEXT_(card, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); /*copy entries to user buffer*/ if (cmd->data.setadapterparms.hdr.seq_no == 1) { @@ -3733,9 +3717,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card, } qinfo->udata_offset += data_len; /* check if all replies received ... */ - QETH_DBF_TEXT_(TRACE, 4, "srtot%i", + QETH_CARD_TEXT_(card, 4, "srtot%i", cmd->data.setadapterparms.hdr.used_total); - QETH_DBF_TEXT_(TRACE, 4, "srseq%i", + QETH_CARD_TEXT_(card, 4, "srseq%i", cmd->data.setadapterparms.hdr.seq_no); if (cmd->data.setadapterparms.hdr.seq_no < cmd->data.setadapterparms.hdr.used_total) @@ -3752,7 +3736,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) struct qeth_arp_query_info qinfo = {0, }; int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "snmpcmd"); + QETH_CARD_TEXT(card, 3, "snmpcmd"); if (card->info.guestlan) return -EOPNOTSUPP; @@ -3764,15 +3748,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) /* skip 4 bytes (data_len struct member) to get req_len */ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT; - ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); - if (!ureq) { - QETH_DBF_TEXT(TRACE, 2, "snmpnome"); - return -ENOMEM; - } - if (copy_from_user(ureq, udata, - req_len + sizeof(struct qeth_snmp_ureq_hdr))) { - kfree(ureq); - return -EFAULT; + ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); + if (IS_ERR(ureq)) { + QETH_CARD_TEXT(card, 2, "snmpnome"); + return PTR_ERR(ureq); } qinfo.udata_len = ureq->hdr.data_len; qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); @@ -3991,6 +3970,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card) else goto retry; } + card->read_or_write_problem = 0; rc = qeth_mpc_initialize(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); @@ -4120,13 +4100,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, skb_len -= data_len; if (skb_len) { if (qeth_is_last_sbale(element)) { - QETH_DBF_TEXT(TRACE, 4, "unexeob"); - QETH_DBF_TEXT_(TRACE, 4, "%s", - CARD_BUS_ID(card)); - QETH_DBF_TEXT(QERR, 2, "unexeob"); - QETH_DBF_TEXT_(QERR, 2, "%s", - CARD_BUS_ID(card)); - QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer)); + QETH_CARD_TEXT(card, 4, "unexeob"); + QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); dev_kfree_skb_any(skb); card->stats.rx_errors++; return NULL; @@ -4147,8 +4122,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, return skb; no_mem: if (net_ratelimit()) { - QETH_DBF_TEXT(TRACE, 2, "noskbmem"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + QETH_CARD_TEXT(card, 2, "noskbmem"); } card->stats.rx_dropped++; return NULL; @@ -4164,17 +4138,17 @@ static void qeth_unregister_dbf_views(void) } } -void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...) +void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) { char dbf_txt_buf[32]; va_list args; - if (level > (qeth_dbf[dbf_nix].id)->level) + if (level > id->level) return; va_start(args, fmt); vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); va_end(args); - debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); + debug_text_event(id, level, dbf_txt_buf); } EXPORT_SYMBOL_GPL(qeth_dbf_longtext); @@ -4282,6 +4256,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) struct device *dev; int rc; unsigned long flags; + char dbf_name[20]; QETH_DBF_TEXT(SETUP, 2, "probedev"); @@ -4297,6 +4272,17 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) rc = -ENOMEM; goto err_dev; } + + snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", + dev_name(&gdev->dev)); + card->debug = debug_register(dbf_name, 2, 1, 8); + if (!card->debug) { + QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); + rc = -ENOMEM; + goto err_card; + } + debug_register_view(card->debug, &debug_hex_ascii_view); + card->read.ccwdev = gdev->cdev[0]; card->write.ccwdev = gdev->cdev[1]; card->data.ccwdev = gdev->cdev[2]; @@ -4309,12 +4295,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) rc = qeth_determine_card_type(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - goto err_card; + goto err_dbf; } rc = qeth_setup_card(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - goto err_card; + goto err_dbf; } if (card->info.type == QETH_CARD_TYPE_OSN) @@ -4322,7 +4308,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) else rc = qeth_core_create_device_attributes(dev); if (rc) - goto err_card; + goto err_dbf; switch (card->info.type) { case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_OSM: @@ -4352,6 +4338,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) qeth_core_remove_osn_attributes(dev); else qeth_core_remove_device_attributes(dev); +err_dbf: + debug_unregister(card->debug); err_card: qeth_core_free_card(card); err_dev: @@ -4365,16 +4353,19 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); QETH_DBF_TEXT(SETUP, 2, "removedv"); - if (card->discipline.ccwgdriver) { - card->discipline.ccwgdriver->remove(gdev); - qeth_core_free_discipline(card); - } if (card->info.type == QETH_CARD_TYPE_OSN) { qeth_core_remove_osn_attributes(&gdev->dev); } else { qeth_core_remove_device_attributes(&gdev->dev); } + + if (card->discipline.ccwgdriver) { + card->discipline.ccwgdriver->remove(gdev); + qeth_core_free_discipline(card); + } + + debug_unregister(card->debug); write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_del(&card->list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index f9ed24de75140e41630b638458acf22e404f8cbd..e37dd8c4bf4ecd9cebffac2e484a071c843dcf9f 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -616,8 +616,9 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; #define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2) #define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12) #define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] -#define QETH_IDX_ACT_ERR_EXCL 0x19 -#define QETH_IDX_ACT_ERR_AUTH 0x1E +#define QETH_IDX_ACT_ERR_EXCL 0x19 +#define QETH_IDX_ACT_ERR_AUTH 0x1E +#define QETH_IDX_ACT_ERR_AUTH_USER 0x20 #define PDU_ENCAPSULATION(buffer) \ (buffer + *(buffer + (*(buffer + 0x0b)) + \ diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 2eb022ff2610f725bad556a6d67c47371dae13d8..42fa783a70c83b9c65bf0b91472e1f05e220b80f 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -411,7 +411,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, if (!card) return -EINVAL; - mutex_lock(&card->conf_mutex); + mutex_lock(&card->discipline_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; @@ -433,6 +433,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, if (card->options.layer2 == newdis) goto out; else { + card->info.mac_bits = 0; if (card->discipline.ccwgdriver) { card->discipline.ccwgdriver->remove(card->gdev); qeth_core_free_discipline(card); @@ -445,7 +446,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, rc = card->discipline.ccwgdriver->probe(card->gdev); out: - mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return rc ? rc : count; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d43f57a4ac6614b2d8924b9381a4ab77d814390f..830d63524d612ff8bc665f918d6b01400e5c0c88 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -79,7 +79,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) rc = -EOPNOTSUPP; } if (rc) - QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); + QETH_CARD_TEXT_(card, 2, "ioce%d", rc); return rc; } @@ -130,7 +130,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; __u8 *mac; - QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb"); + QETH_CARD_TEXT(card, 2, "L2Sgmacb"); cmd = (struct qeth_ipa_cmd *) data; mac = &cmd->data.setdelmac.mac[0]; /* MAC already registered, needed in couple/uncouple case */ @@ -147,7 +147,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Sgmac"); + QETH_CARD_TEXT(card, 2, "L2Sgmac"); return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, qeth_l2_send_setgroupmac_cb); } @@ -159,7 +159,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; __u8 *mac; - QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb"); + QETH_CARD_TEXT(card, 2, "L2Dgmacb"); cmd = (struct qeth_ipa_cmd *) data; mac = &cmd->data.setdelmac.mac[0]; if (cmd->hdr.return_code) @@ -170,7 +170,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Dgmac"); + QETH_CARD_TEXT(card, 2, "L2Dgmac"); return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, qeth_l2_send_delgroupmac_cb); } @@ -262,15 +262,14 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); + QETH_CARD_TEXT(card, 2, "L2sdvcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. " "Continuing\n", cmd->data.setdelvlan.vlan_id, QETH_CARD_IFNAME(card), cmd->hdr.return_code); - QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); - QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); + QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); } return 0; } @@ -281,7 +280,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd); + QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelvlan.vlan_id = i; @@ -292,7 +291,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, static void qeth_l2_process_vlans(struct qeth_card *card, int clear) { struct qeth_vlan_vid *id; - QETH_DBF_TEXT(TRACE, 3, "L2prcvln"); + QETH_CARD_TEXT(card, 3, "L2prcvln"); spin_lock_bh(&card->vlanlock); list_for_each_entry(id, &card->vid_list, list) { if (clear) @@ -310,13 +309,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) struct qeth_card *card = dev->ml_priv; struct qeth_vlan_vid *id; - QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid); + QETH_CARD_TEXT_(card, 4, "aid:%d", vid); if (card->info.type == QETH_CARD_TYPE_OSM) { - QETH_DBF_TEXT(TRACE, 3, "aidOSM"); + QETH_CARD_TEXT(card, 3, "aidOSM"); return; } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "aidREC"); + QETH_CARD_TEXT(card, 3, "aidREC"); return; } id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); @@ -334,13 +333,13 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (card->info.type == QETH_CARD_TYPE_OSM) { - QETH_DBF_TEXT(TRACE, 3, "kidOSM"); + QETH_CARD_TEXT(card, 3, "kidOSM"); return; } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "kidREC"); + QETH_CARD_TEXT(card, 3, "kidREC"); return; } spin_lock_bh(&card->vlanlock); @@ -456,7 +455,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card, /* else unknown */ default: dev_kfree_skb_any(skb); - QETH_DBF_TEXT(TRACE, 3, "inbunkno"); + QETH_CARD_TEXT(card, 3, "inbunkno"); QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); continue; } @@ -474,7 +473,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 2, "L2sdmac"); + QETH_CARD_TEXT(card, 2, "L2sdmac"); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; @@ -488,10 +487,10 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "L2Smaccb"); + QETH_CARD_TEXT(card, 2, "L2Smaccb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; switch (cmd->hdr.return_code) { case IPA_RC_L2_DUP_MAC: @@ -523,7 +522,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Setmac"); + QETH_CARD_TEXT(card, 2, "L2Setmac"); return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, qeth_l2_send_setmac_cb); } @@ -534,10 +533,10 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb"); + QETH_CARD_TEXT(card, 2, "L2Dmaccb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); cmd->hdr.return_code = -EIO; return 0; } @@ -548,7 +547,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Delmac"); + QETH_CARD_TEXT(card, 2, "L2Delmac"); if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) return 0; return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, @@ -594,23 +593,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) struct qeth_card *card = dev->ml_priv; int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "setmac"); + QETH_CARD_TEXT(card, 3, "setmac"); if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) { - QETH_DBF_TEXT(TRACE, 3, "setmcINV"); + QETH_CARD_TEXT(card, 3, "setmcINV"); return -EOPNOTSUPP; } if (card->info.type == QETH_CARD_TYPE_OSN || card->info.type == QETH_CARD_TYPE_OSM || card->info.type == QETH_CARD_TYPE_OSX) { - QETH_DBF_TEXT(TRACE, 3, "setmcTYP"); + QETH_CARD_TEXT(card, 3, "setmcTYP"); return -EOPNOTSUPP; } - QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card)); - QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN); + QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN); if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "setmcREC"); + QETH_CARD_TEXT(card, 3, "setmcREC"); return -ERESTARTSYS; } rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); @@ -627,7 +625,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) if (card->info.type == QETH_CARD_TYPE_OSN) return ; - QETH_DBF_TEXT(TRACE, 3, "setmulti"); + QETH_CARD_TEXT(card, 3, "setmulti"); if (qeth_threads_running(card, QETH_RECOVER_THREAD) && (card->state != CARD_STATE_UP)) return; @@ -714,10 +712,13 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; } - if (card->info.type != QETH_CARD_TYPE_IQD) + if (card->info.type != QETH_CARD_TYPE_IQD) { + if (qeth_hdr_chk_and_bounce(new_skb, + sizeof(struct qeth_hdr_layer2))) + goto tx_drop; rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); - else + } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, elements, data_offset, hd_len); if (!rc) { @@ -771,11 +772,10 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, card->perf_stats.inbound_start_time = qeth_get_micros(); } if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, + QETH_CARD_TEXT(card, 1, "qdinchk"); + QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element, count); - QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); + QETH_CARD_TEXT_(card, 1, "%04X", queue); qeth_schedule_recovery(card); return; } @@ -799,13 +799,13 @@ static int qeth_l2_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "qethopen"); + QETH_CARD_TEXT(card, 4, "qethopen"); if (card->state != CARD_STATE_SOFTSETUP) return -ENODEV; if ((card->info.type != QETH_CARD_TYPE_OSN) && (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { - QETH_DBF_TEXT(TRACE, 4, "nomacadr"); + QETH_CARD_TEXT(card, 4, "nomacadr"); return -EPERM; } card->data.state = CH_STATE_UP; @@ -822,7 +822,7 @@ static int qeth_l2_stop(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "qethstop"); + QETH_CARD_TEXT(card, 4, "qethstop"); netif_tx_disable(dev); if (card->state == CARD_STATE_UP) card->state = CARD_STATE_SOFTSETUP; @@ -860,8 +860,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) unregister_netdev(card->dev); card->dev = NULL; } - - qeth_l2_del_all_mc(card); return; } @@ -935,6 +933,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) enum qeth_card_states recover_flag; BUG_ON(!card); + mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 2, "setonlin"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); @@ -1012,6 +1011,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); out: mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; out_remove: @@ -1025,6 +1025,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) else card->state = CARD_STATE_DOWN; mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return rc; } @@ -1040,6 +1041,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, int rc = 0, rc2 = 0, rc3 = 0; enum qeth_card_states recover_flag; + mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); @@ -1060,6 +1062,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, /* let user_space know that device is offline */ kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; } @@ -1074,11 +1077,10 @@ static int qeth_l2_recover(void *ptr) int rc = 0; card = (struct qeth_card *) ptr; - QETH_DBF_TEXT(TRACE, 2, "recover1"); - QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 2, "recover1"); if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) return 0; - QETH_DBF_TEXT(TRACE, 2, "recover2"); + QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); card->use_hard_stop = 1; @@ -1181,12 +1183,12 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 5, "osndctrd"); + QETH_CARD_TEXT(card, 5, "osndctrd"); wait_event(card->wait_q, atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); - QETH_DBF_TEXT(TRACE, 6, "osnoirqp"); + QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, (addr_t) iob, 0, 0); @@ -1194,7 +1196,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " "ccw_device_start rc = %i\n", rc); - QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); qeth_release_buffer(iob->channel, iob); atomic_set(&card->write.irq_pending, 0); wake_up(&card->wait_q); @@ -1207,7 +1209,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card, { u16 s1, s2; - QETH_DBF_TEXT(TRACE, 4, "osndipa"); + QETH_CARD_TEXT(card, 4, "osndipa"); qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); @@ -1225,12 +1227,12 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len) struct qeth_card *card; int rc; - QETH_DBF_TEXT(TRACE, 2, "osnsdmc"); if (!dev) return -ENODEV; card = dev->ml_priv; if (!card) return -ENODEV; + QETH_CARD_TEXT(card, 2, "osnsdmc"); if ((card->state != CARD_STATE_UP) && (card->state != CARD_STATE_SOFTSETUP)) return -ENODEV; @@ -1247,13 +1249,13 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev, { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 2, "osnreg"); *dev = qeth_l2_netdev_by_devno(read_dev_no); if (*dev == NULL) return -ENODEV; card = (*dev)->ml_priv; if (!card) return -ENODEV; + QETH_CARD_TEXT(card, 2, "osnreg"); if ((assist_cb == NULL) || (data_cb == NULL)) return -EINVAL; card->osn_info.assist_cb = assist_cb; @@ -1266,12 +1268,12 @@ void qeth_osn_deregister(struct net_device *dev) { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 2, "osndereg"); if (!dev) return; card = dev->ml_priv; if (!card) return; + QETH_CARD_TEXT(card, 2, "osndereg"); card->osn_info.assist_cb = NULL; card->osn_info.data_cb = NULL; return; diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 8447d233d0b33e97ef9e82511cecd1e08119a02f..e705b27ec7dc133c7920f6a50d7a71a2a2204ca2 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -64,5 +64,6 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, const u8 *); int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types); int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types); +int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); #endif /* __QETH_L3_H__ */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 61adae21a464c5d950c1f4d2b09b11dba10773e0..e22ae248f613eea825eaf8fe48f02bbd078f9818 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -195,7 +195,7 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) } } -static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, +int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr) { struct qeth_ipato_entry *ipatoe; @@ -287,7 +287,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card, addr->users += add ? 1 : -1; if (add && (addr->type == QETH_IP_TYPE_NORMAL) && qeth_l3_is_addr_covered_by_ipato(card, addr)) { - QETH_DBF_TEXT(TRACE, 2, "tkovaddr"); + QETH_CARD_TEXT(card, 2, "tkovaddr"); addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } list_add_tail(&addr->entry, card->ip_tbd_list); @@ -301,13 +301,13 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "delip"); + QETH_CARD_TEXT(card, 4, "delip"); if (addr->proto == QETH_PROT_IPV4) - QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); + QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); else { - QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); } spin_lock_irqsave(&card->ip_lock, flags); rc = __qeth_l3_insert_ip_todo(card, addr, 0); @@ -320,12 +320,12 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "addip"); + QETH_CARD_TEXT(card, 4, "addip"); if (addr->proto == QETH_PROT_IPV4) - QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); + QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); else { - QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); } spin_lock_irqsave(&card->ip_lock, flags); rc = __qeth_l3_insert_ip_todo(card, addr, 1); @@ -353,10 +353,10 @@ static void qeth_l3_delete_mc_addresses(struct qeth_card *card) struct qeth_ipaddr *iptodo; unsigned long flags; - QETH_DBF_TEXT(TRACE, 4, "delmc"); + QETH_CARD_TEXT(card, 4, "delmc"); iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (!iptodo) { - QETH_DBF_TEXT(TRACE, 2, "dmcnomem"); + QETH_CARD_TEXT(card, 2, "dmcnomem"); return; } iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; @@ -457,8 +457,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) unsigned long flags; int rc; - QETH_DBF_TEXT(TRACE, 2, "sdiplist"); - QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 2, "sdiplist"); + QETH_CARD_HEX(card, 2, &card, sizeof(void *)); if (card->options.sniffer) return; @@ -466,7 +466,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) tbd_list = card->ip_tbd_list; card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); if (!card->ip_tbd_list) { - QETH_DBF_TEXT(TRACE, 0, "silnomem"); + QETH_CARD_TEXT(card, 0, "silnomem"); card->ip_tbd_list = tbd_list; spin_unlock_irqrestore(&card->ip_lock, flags); return; @@ -517,7 +517,7 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, struct qeth_ipaddr *addr, *tmp; unsigned long flags; - QETH_DBF_TEXT(TRACE, 4, "clearip"); + QETH_CARD_TEXT(card, 4, "clearip"); if (recover && card->options.sniffer) return; spin_lock_irqsave(&card->ip_lock, flags); @@ -577,7 +577,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "setdelmc"); + QETH_CARD_TEXT(card, 4, "setdelmc"); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); @@ -615,8 +615,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, struct qeth_ipa_cmd *cmd; __u8 netmask[16]; - QETH_DBF_TEXT(TRACE, 4, "setdelip"); - QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags); + QETH_CARD_TEXT(card, 4, "setdelip"); + QETH_CARD_TEXT_(card, 4, "flags%02X", flags); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); @@ -645,7 +645,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 4, "setroutg"); + QETH_CARD_TEXT(card, 4, "setroutg"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setrtg.type = (type); @@ -689,7 +689,7 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "setrtg4"); + QETH_CARD_TEXT(card, 3, "setrtg4"); qeth_l3_correct_routing_type(card, &card->options.route4.type, QETH_PROT_IPV4); @@ -709,7 +709,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "setrtg6"); + QETH_CARD_TEXT(card, 3, "setrtg6"); #ifdef CONFIG_QETH_IPV6 if (!qeth_is_supported(card, IPA_IPV6)) @@ -753,7 +753,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 2, "addipato"); + QETH_CARD_TEXT(card, 2, "addipato"); spin_lock_irqsave(&card->ip_lock, flags); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != new->proto) @@ -778,7 +778,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *ipatoe, *tmp; unsigned long flags; - QETH_DBF_TEXT(TRACE, 2, "delipato"); + QETH_CARD_TEXT(card, 2, "delipato"); spin_lock_irqsave(&card->ip_lock, flags); list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { if (ipatoe->proto != proto) @@ -806,11 +806,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "addvipa4"); + QETH_CARD_TEXT(card, 2, "addvipa4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "addvipa6"); + QETH_CARD_TEXT(card, 2, "addvipa6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -841,11 +841,11 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "delvipa4"); + QETH_CARD_TEXT(card, 2, "delvipa4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "delvipa6"); + QETH_CARD_TEXT(card, 2, "delvipa6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -870,11 +870,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "addrxip4"); + QETH_CARD_TEXT(card, 2, "addrxip4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "addrxip6"); + QETH_CARD_TEXT(card, 2, "addrxip6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -905,11 +905,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "addrxip4"); + QETH_CARD_TEXT(card, 2, "addrxip4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "addrxip6"); + QETH_CARD_TEXT(card, 2, "addrxip6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -929,15 +929,15 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, int cnt = 3; if (addr->proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "setaddr4"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); + QETH_CARD_TEXT(card, 2, "setaddr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); } else if (addr->proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "setaddr6"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_TEXT(card, 2, "setaddr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); } else { - QETH_DBF_TEXT(TRACE, 2, "setaddr?"); - QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); + QETH_CARD_TEXT(card, 2, "setaddr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); } do { if (addr->is_multicast) @@ -946,10 +946,10 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, addr->set_flags); if (rc) - QETH_DBF_TEXT(TRACE, 2, "failed"); + QETH_CARD_TEXT(card, 2, "failed"); } while ((--cnt > 0) && rc); if (rc) { - QETH_DBF_TEXT(TRACE, 2, "FAILED"); + QETH_CARD_TEXT(card, 2, "FAILED"); qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); dev_warn(&card->gdev->dev, "Registering IP address %s failed\n", buf); @@ -963,15 +963,15 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, int rc = 0; if (addr->proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "deladdr4"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); + QETH_CARD_TEXT(card, 2, "deladdr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); } else if (addr->proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "deladdr6"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_TEXT(card, 2, "deladdr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); } else { - QETH_DBF_TEXT(TRACE, 2, "deladdr?"); - QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); + QETH_CARD_TEXT(card, 2, "deladdr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); } if (addr->is_multicast) rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); @@ -979,7 +979,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, addr->del_flags); if (rc) - QETH_DBF_TEXT(TRACE, 2, "failed"); + QETH_CARD_TEXT(card, 2, "failed"); return rc; } @@ -1012,7 +1012,7 @@ static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command, struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "adpmode"); + QETH_CARD_TEXT(card, 4, "adpmode"); iob = qeth_get_adapter_cmd(card, command, sizeof(struct qeth_ipacmd_setadpparms)); @@ -1027,7 +1027,7 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 4, "adphstr"); + QETH_CARD_TEXT(card, 4, "adphstr"); if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { rc = qeth_l3_send_setadp_mode(card, @@ -1093,7 +1093,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "defadpcb"); + QETH_CARD_TEXT(card, 4, "defadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code == 0) { @@ -1106,13 +1106,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; - QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask); + QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask); } if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { card->info.tx_csum_mask = cmd->data.setassparms.data.flags_32bit; - QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask); + QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask); } return 0; @@ -1125,7 +1125,7 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "getasscm"); + QETH_CARD_TEXT(card, 4, "getasscm"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); @@ -1147,7 +1147,7 @@ static int qeth_l3_send_setassparms(struct qeth_card *card, int rc; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "sendassp"); + QETH_CARD_TEXT(card, 4, "sendassp"); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if (len <= sizeof(__u32)) @@ -1166,7 +1166,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, int rc; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 4, "simassp6"); + QETH_CARD_TEXT(card, 4, "simassp6"); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 0, QETH_PROT_IPV6); rc = qeth_l3_send_setassparms(card, iob, 0, 0, @@ -1182,7 +1182,7 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, int length = 0; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 4, "simassp4"); + QETH_CARD_TEXT(card, 4, "simassp4"); if (data) length = sizeof(__u32); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, @@ -1196,7 +1196,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "ipaarp"); + QETH_CARD_TEXT(card, 3, "ipaarp"); if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { dev_info(&card->gdev->dev, @@ -1218,7 +1218,7 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); + QETH_CARD_TEXT(card, 3, "ipaipfrg"); if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { dev_info(&card->gdev->dev, @@ -1243,7 +1243,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "stsrcmac"); + QETH_CARD_TEXT(card, 3, "stsrcmac"); if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { dev_info(&card->gdev->dev, @@ -1265,7 +1265,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "strtvlan"); + QETH_CARD_TEXT(card, 3, "strtvlan"); if (!qeth_is_supported(card, IPA_FULL_VLAN)) { dev_info(&card->gdev->dev, @@ -1289,7 +1289,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "stmcast"); + QETH_CARD_TEXT(card, 3, "stmcast"); if (!qeth_is_supported(card, IPA_MULTICASTING)) { dev_info(&card->gdev->dev, @@ -1349,7 +1349,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "softipv6"); + QETH_CARD_TEXT(card, 3, "softipv6"); if (card->info.type == QETH_CARD_TYPE_IQD) goto out; @@ -1395,7 +1395,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "strtipv6"); + QETH_CARD_TEXT(card, 3, "strtipv6"); if (!qeth_is_supported(card, IPA_IPV6)) { dev_info(&card->gdev->dev, @@ -1412,7 +1412,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); + QETH_CARD_TEXT(card, 3, "stbrdcst"); card->info.broadcast_capable = 0; if (!qeth_is_supported(card, IPA_FILTERING)) { dev_info(&card->gdev->dev, @@ -1512,7 +1512,7 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "strtcsum"); + QETH_CARD_TEXT(card, 3, "strtcsum"); if (card->options.checksum_type == NO_CHECKSUMMING) { dev_info(&card->gdev->dev, @@ -1569,7 +1569,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "sttso"); + QETH_CARD_TEXT(card, 3, "sttso"); if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { dev_info(&card->gdev->dev, @@ -1596,7 +1596,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) static int qeth_l3_start_ipassists(struct qeth_card *card) { - QETH_DBF_TEXT(TRACE, 3, "strtipas"); + QETH_CARD_TEXT(card, 3, "strtipas"); qeth_set_access_ctrl_online(card); /* go on*/ qeth_l3_start_ipa_arp_processing(card); /* go on*/ @@ -1619,7 +1619,7 @@ static int qeth_l3_put_unique_id(struct qeth_card *card) struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "puniqeid"); + QETH_CARD_TEXT(card, 2, "puniqeid"); if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == UNIQUE_ID_NOT_BY_CARD) @@ -1723,7 +1723,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, cmd = (struct qeth_ipa_cmd *)data; rc = cmd->hdr.return_code; if (rc) - QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc); + QETH_CARD_TEXT_(card, 2, "dxter%x", rc); switch (cmd->data.diagass.action) { case QETH_DIAGS_CMD_TRACE_QUERY: break; @@ -1800,7 +1800,7 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) struct ip_mc_list *im4; char buf[MAX_ADDR_LEN]; - QETH_DBF_TEXT(TRACE, 4, "addmc"); + QETH_CARD_TEXT(card, 4, "addmc"); for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); @@ -1820,7 +1820,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card) struct vlan_group *vg; int i; - QETH_DBF_TEXT(TRACE, 4, "addmcvl"); + QETH_CARD_TEXT(card, 4, "addmcvl"); if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) return; @@ -1844,7 +1844,7 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) { struct in_device *in4_dev; - QETH_DBF_TEXT(TRACE, 4, "chkmcv4"); + QETH_CARD_TEXT(card, 4, "chkmcv4"); in4_dev = in_dev_get(card->dev); if (in4_dev == NULL) return; @@ -1862,7 +1862,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) struct ifmcaddr6 *im6; char buf[MAX_ADDR_LEN]; - QETH_DBF_TEXT(TRACE, 4, "addmc6"); + QETH_CARD_TEXT(card, 4, "addmc6"); for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); @@ -1883,7 +1883,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card) struct vlan_group *vg; int i; - QETH_DBF_TEXT(TRACE, 4, "admc6vl"); + QETH_CARD_TEXT(card, 4, "admc6vl"); if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) return; @@ -1907,7 +1907,7 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) { struct inet6_dev *in6_dev; - QETH_DBF_TEXT(TRACE, 4, "chkmcv6"); + QETH_CARD_TEXT(card, 4, "chkmcv6"); if (!qeth_is_supported(card, IPA_IPV6)) return ; in6_dev = in6_dev_get(card->dev); @@ -1928,7 +1928,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, struct in_ifaddr *ifa; struct qeth_ipaddr *addr; - QETH_DBF_TEXT(TRACE, 4, "frvaddr4"); + QETH_CARD_TEXT(card, 4, "frvaddr4"); in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); if (!in_dev) @@ -1954,7 +1954,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, struct inet6_ifaddr *ifa; struct qeth_ipaddr *addr; - QETH_DBF_TEXT(TRACE, 4, "frvaddr6"); + QETH_CARD_TEXT(card, 4, "frvaddr6"); in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); if (!in6_dev) @@ -1989,7 +1989,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev, struct qeth_card *card = dev->ml_priv; unsigned long flags; - QETH_DBF_TEXT(TRACE, 4, "vlanreg"); + QETH_CARD_TEXT(card, 4, "vlanreg"); spin_lock_irqsave(&card->vlanlock, flags); card->vlangrp = grp; spin_unlock_irqrestore(&card->vlanlock, flags); @@ -2005,9 +2005,9 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) struct qeth_card *card = dev->ml_priv; unsigned long flags; - QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "kidREC"); + QETH_CARD_TEXT(card, 3, "kidREC"); return; } spin_lock_irqsave(&card->vlanlock, flags); @@ -2162,7 +2162,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card, break; default: dev_kfree_skb_any(skb); - QETH_DBF_TEXT(TRACE, 3, "inbunkno"); + QETH_CARD_TEXT(card, 3, "inbunkno"); QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); continue; } @@ -2229,7 +2229,8 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) card = vlan_dev_real_dev(dev)->ml_priv; if (card && card->options.layer2) card = NULL; - QETH_DBF_TEXT_(TRACE, 4, "%d", rc); + if (card) + QETH_CARD_TEXT_(card, 4, "%d", rc); return card ; } @@ -2307,10 +2308,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card) } else if (card->options.sniffer && /* HiperSockets trace */ qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { if (dev->flags & IFF_PROMISC) { - QETH_DBF_TEXT(TRACE, 3, "+promisc"); + QETH_CARD_TEXT(card, 3, "+promisc"); qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); } else { - QETH_DBF_TEXT(TRACE, 3, "-promisc"); + QETH_CARD_TEXT(card, 3, "-promisc"); qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); } } @@ -2320,7 +2321,7 @@ static void qeth_l3_set_multicast_list(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 3, "setmulti"); + QETH_CARD_TEXT(card, 3, "setmulti"); if (qeth_threads_running(card, QETH_RECOVER_THREAD) && (card->state != CARD_STATE_UP)) return; @@ -2365,7 +2366,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arpstnoe"); + QETH_CARD_TEXT(card, 3, "arpstnoe"); /* * currently GuestLAN only supports the ARP assist function @@ -2417,17 +2418,17 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, int uentry_size; int i; - QETH_DBF_TEXT(TRACE, 4, "arpquecb"); + QETH_CARD_TEXT(card, 4, "arpquecb"); qinfo = (struct qeth_arp_query_info *) reply->param; cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code); return 0; } if (cmd->data.setassparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; - QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code); return 0; } qdata = &cmd->data.setassparms.data.query_arp; @@ -2449,14 +2450,14 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < qdata->no_entries * uentry_size){ - QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); + QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); cmd->hdr.return_code = -ENOMEM; goto out_error; } - QETH_DBF_TEXT_(TRACE, 4, "anore%i", + QETH_CARD_TEXT_(card, 4, "anore%i", cmd->data.setassparms.hdr.number_of_replies); - QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); - QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries); + QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); + QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { /* strip off "media specific information" */ @@ -2492,7 +2493,7 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, unsigned long), void *reply_param) { - QETH_DBF_TEXT(TRACE, 4, "sendarp"); + QETH_CARD_TEXT(card, 4, "sendarp"); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), @@ -2508,7 +2509,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arpquery"); + QETH_CARD_TEXT(card, 3, "arpquery"); if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ IPA_ARP_PROCESSING)) { @@ -2551,7 +2552,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arpadent"); + QETH_CARD_TEXT(card, 3, "arpadent"); /* * currently GuestLAN only supports the ARP assist function @@ -2590,7 +2591,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arprment"); + QETH_CARD_TEXT(card, 3, "arprment"); /* * currently GuestLAN only supports the ARP assist function @@ -2626,7 +2627,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) int rc; int tmp; - QETH_DBF_TEXT(TRACE, 3, "arpflush"); + QETH_CARD_TEXT(card, 3, "arpflush"); /* * currently GuestLAN only supports the ARP assist function @@ -2734,7 +2735,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) rc = -EOPNOTSUPP; } if (rc) - QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); + QETH_CARD_TEXT_(card, 2, "ioce%d", rc); return rc; } @@ -2903,19 +2904,11 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) unsigned long tcpd = (unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4; int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); - int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd); + int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); elements += skb_shinfo(skb)->nr_frags; return elements; } -static inline int qeth_l3_tso_check(struct sk_buff *skb) -{ - int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) - - (unsigned long)skb->data; - return (((unsigned long)skb->data & PAGE_MASK) != - (((unsigned long)skb->data + len) & PAGE_MASK)); -} - static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int rc; @@ -3015,8 +3008,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) (cast_type == RTN_UNSPEC)) { hdr = (struct qeth_hdr *)skb_push(new_skb, sizeof(struct qeth_hdr_tso)); - if (qeth_l3_tso_check(new_skb)) - QETH_DBF_MESSAGE(2, "tso skb misaligned\n"); memset(hdr, 0, sizeof(struct qeth_hdr_tso)); qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); qeth_tso_fill_header(card, hdr, new_skb); @@ -3047,10 +3038,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) elements_needed += elems; nr_frags = skb_shinfo(new_skb)->nr_frags; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (card->info.type != QETH_CARD_TYPE_IQD) { + int len; + if (large_send == QETH_LARGE_SEND_TSO) + len = ((unsigned long)tcp_hdr(new_skb) + + tcp_hdr(new_skb)->doff * 4) - + (unsigned long)new_skb->data; + else + len = sizeof(struct qeth_hdr_layer3); + + if (qeth_hdr_chk_and_bounce(new_skb, len)) + goto tx_drop; rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements_needed); - else + } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, elements_needed, data_offset, 0); @@ -3103,7 +3104,7 @@ static int qeth_l3_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "qethopen"); + QETH_CARD_TEXT(card, 4, "qethopen"); if (card->state != CARD_STATE_SOFTSETUP) return -ENODEV; card->data.state = CH_STATE_UP; @@ -3119,7 +3120,7 @@ static int qeth_l3_stop(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "qethstop"); + QETH_CARD_TEXT(card, 4, "qethstop"); netif_tx_disable(dev); if (card->state == CARD_STATE_UP) card->state = CARD_STATE_SOFTSETUP; @@ -3312,11 +3313,10 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, card->perf_stats.inbound_start_time = qeth_get_micros(); } if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", + QETH_CARD_TEXT(card, 1, "qdinchk"); + QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element, count); - QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); + QETH_CARD_TEXT_(card, 1, "%04X", queue); qeth_schedule_recovery(card); return; } @@ -3354,6 +3354,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) { struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + qeth_l3_remove_device_attributes(&cgdev->dev); + qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); @@ -3367,7 +3369,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) card->dev = NULL; } - qeth_l3_remove_device_attributes(&cgdev->dev); qeth_l3_clear_ip_list(card, 0, 0); qeth_l3_clear_ipato_list(card); return; @@ -3380,6 +3381,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) enum qeth_card_states recover_flag; BUG_ON(!card); + mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 2, "setonlin"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); @@ -3461,6 +3463,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); out: mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; out_remove: card->use_hard_stop = 1; @@ -3473,6 +3476,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) else card->state = CARD_STATE_DOWN; mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return rc; } @@ -3488,6 +3492,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, int rc = 0, rc2 = 0, rc3 = 0; enum qeth_card_states recover_flag; + mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); @@ -3508,6 +3513,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, /* let user_space know that device is offline */ kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; } @@ -3522,11 +3528,11 @@ static int qeth_l3_recover(void *ptr) int rc = 0; card = (struct qeth_card *) ptr; - QETH_DBF_TEXT(TRACE, 2, "recover1"); - QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 2, "recover1"); + QETH_CARD_HEX(card, 2, &card, sizeof(void *)); if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) return 0; - QETH_DBF_TEXT(TRACE, 2, "recover2"); + QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); card->use_hard_stop = 1; @@ -3624,8 +3630,8 @@ static int qeth_l3_ip_event(struct notifier_block *this, if (dev_net(dev) != &init_net) return NOTIFY_DONE; - QETH_DBF_TEXT(TRACE, 3, "ipevent"); card = qeth_l3_get_card_from_dev(dev); + QETH_CARD_TEXT(card, 3, "ipevent"); if (!card) return NOTIFY_DONE; @@ -3671,11 +3677,11 @@ static int qeth_l3_ip6_event(struct notifier_block *this, struct qeth_ipaddr *addr; struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 3, "ip6event"); card = qeth_l3_get_card_from_dev(dev); if (!card) return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ip6event"); if (!qeth_is_supported(card, IPA_IPV6)) return NOTIFY_DONE; @@ -3714,7 +3720,7 @@ static int qeth_l3_register_notifiers(void) { int rc; - QETH_DBF_TEXT(TRACE, 5, "regnotif"); + QETH_DBF_TEXT(SETUP, 5, "regnotif"); rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); if (rc) return rc; @@ -3733,7 +3739,7 @@ static int qeth_l3_register_notifiers(void) static void qeth_l3_unregister_notifiers(void) { - QETH_DBF_TEXT(TRACE, 5, "unregnot"); + QETH_DBF_TEXT(SETUP, 5, "unregnot"); BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); #ifdef CONFIG_QETH_IPV6 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index fb5318b30e99bfe849602fa127bbf1a1ffe83336..67cfa68dcf1b9781f45314521f357380a5a6568c 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -479,6 +479,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + struct qeth_ipaddr *tmpipa, *t; char *tmp; int rc = 0; @@ -497,8 +498,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, card->ipato.enabled = (card->ipato.enabled)? 0 : 1; } else if (!strcmp(tmp, "1")) { card->ipato.enabled = 1; + list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { + if ((tmpipa->type == QETH_IP_TYPE_NORMAL) && + qeth_l3_is_addr_covered_by_ipato(card, tmpipa)) + tmpipa->set_flags |= + QETH_IPA_SETIP_TAKEOVER_FLAG; + } + } else if (!strcmp(tmp, "0")) { card->ipato.enabled = 0; + list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { + if (tmpipa->set_flags & + QETH_IPA_SETIP_TAKEOVER_FLAG) + tmpipa->set_flags &= + ~QETH_IPA_SETIP_TAKEOVER_FLAG; + } } else rc = -EINVAL; out: diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 70491274da16a7cf74120eaf79b416d12789c618..65e1cf1049435f0f8a924b15fa9da81eab896605 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -47,6 +47,7 @@ static struct device *smsg_dev; static DEFINE_SPINLOCK(smsg_list_lock); static LIST_HEAD(smsg_list); +static int iucv_path_connected; static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); static void smsg_message_pending(struct iucv_path *, struct iucv_message *); @@ -142,8 +143,10 @@ static int smsg_pm_freeze(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "smsg_pm_freeze\n"); #endif - if (smsg_path) + if (smsg_path && iucv_path_connected) { iucv_path_sever(smsg_path, NULL); + iucv_path_connected = 0; + } return 0; } @@ -154,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "smsg_pm_restore_thaw\n"); #endif - if (smsg_path) { + if (smsg_path && iucv_path_connected) { memset(smsg_path, 0, sizeof(*smsg_path)); smsg_path->msglim = 255; smsg_path->flags = 0; @@ -165,6 +168,8 @@ static int smsg_pm_restore_thaw(struct device *dev) printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); #endif + if (!rc) + iucv_path_connected = 1; cpcmd("SET SMSG IUCV", NULL, 0, NULL); } return 0; @@ -214,6 +219,8 @@ static int __init smsg_init(void) NULL, NULL, NULL); if (rc) goto out_free_path; + else + iucv_path_connected = 1; smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!smsg_dev) { rc = -ENOMEM; diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index a175be9c496f479c0687bf15a126a13fb0b13941..3b6a06eebf7f25173e39138dcf894f4ec9248294 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c @@ -1587,7 +1587,7 @@ cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr) err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl); if (!err) - return (&rt->u.dst)->dev; + return (&rt->dst)->dev; return NULL; } @@ -1649,7 +1649,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, c3cn->saddr.sin_addr.s_addr = rt->rt_src; /* now commit destination to connection */ - c3cn->dst_cache = &rt->u.dst; + c3cn->dst_cache = &rt->dst; /* try to establish an offloaded connection */ dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 44a07593de56020b444ab64c84074b512d07aef9..1a429ed6da9d722b41f5f979b789915a433ce063 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2653,6 +2653,7 @@ static void fcoe_get_lesb(struct fc_lport *lport, u32 lfc, vlfc, mdac; struct fcoe_dev_stats *devst; struct fcoe_fc_els_lesb *lesb; + struct rtnl_link_stats64 temp; struct net_device *netdev = fcoe_netdev(lport); lfc = 0; @@ -2669,7 +2670,7 @@ static void fcoe_get_lesb(struct fc_lport *lport, lesb->lesb_link_fail = htonl(lfc); lesb->lesb_vlink_fail = htonl(vlfc); lesb->lesb_miss_fka = htonl(mdac); - lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors); + lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); } /** diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 59ae76bace14cb740483484b1a6a21b08157f259..7c031fdc8205eff8475ecbd11dc098f3beb0c584 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -209,6 +209,24 @@ static void chipco_powercontrol_init(struct ssb_chipcommon *cc) } } +/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */ +static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + + switch (bus->chip_id) { + case 0x4312: + case 0x4322: + case 0x4328: + return 7000; + case 0x4325: + /* TODO: */ + default: + return 15000; + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; @@ -218,6 +236,12 @@ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc) if (bus->bustype != SSB_BUSTYPE_PCI) return; + + if (cc->capabilities & SSB_CHIPCO_CAP_PMU) { + cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc); + return; + } + if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) return; @@ -235,6 +259,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc) return; /* We don't have a ChipCommon */ if (cc->dev->id.revision >= 11) cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); + ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status); ssb_pmu_init(cc); chipco_powercontrol_init(cc); ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 3d551245a4e22c8ce59573490d690c132e89c04c..5732bb2c35787b5093d6b12a1d3fb7029bf59990 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -502,9 +502,9 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc) chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk); } +/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */ void ssb_pmu_init(struct ssb_chipcommon *cc) { - struct ssb_bus *bus = cc->dev->bus; u32 pmucap; if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU)) @@ -516,15 +516,12 @@ void ssb_pmu_init(struct ssb_chipcommon *cc) ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, pmucap); - if (cc->pmu.rev >= 1) { - if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) { - chipco_mask32(cc, SSB_CHIPCO_PMU_CTL, - ~SSB_CHIPCO_PMU_CTL_NOILPONW); - } else { - chipco_set32(cc, SSB_CHIPCO_PMU_CTL, - SSB_CHIPCO_PMU_CTL_NOILPONW); - } - } + if (cc->pmu.rev == 1) + chipco_mask32(cc, SSB_CHIPCO_PMU_CTL, + ~SSB_CHIPCO_PMU_CTL_NOILPONW); + else + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, + SSB_CHIPCO_PMU_CTL_NOILPONW); ssb_pmu_pll_init(cc); ssb_pmu_resources_init(cc); } diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 51275aac5b34df0040ff02781a5f308dadc32753..7cee7f4eb60b8f761fe2552e483c59f1f6e3efac 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -486,6 +486,7 @@ static int ssb_devices_register(struct ssb_bus *bus) #ifdef CONFIG_SSB_PCIHOST sdev->irq = bus->host_pci->irq; dev->parent = &bus->host_pci->dev; + sdev->dma_dev = dev->parent; #endif break; case SSB_BUSTYPE_PCMCIA: @@ -501,6 +502,7 @@ static int ssb_devices_register(struct ssb_bus *bus) break; case SSB_BUSTYPE_SSB: dev->dma_mask = &dev->coherent_dma_mask; + sdev->dma_dev = dev; break; } @@ -1226,80 +1228,6 @@ u32 ssb_dma_translation(struct ssb_device *dev) } EXPORT_SYMBOL(ssb_dma_translation); -int ssb_dma_set_mask(struct ssb_device *dev, u64 mask) -{ -#ifdef CONFIG_SSB_PCIHOST - int err; -#endif - - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - err = pci_set_dma_mask(dev->bus->host_pci, mask); - if (err) - return err; - err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask); - return err; -#endif - case SSB_BUSTYPE_SSB: - return dma_set_mask(dev->dev, mask); - default: - __ssb_dma_not_implemented(dev); - } - return -ENOSYS; -} -EXPORT_SYMBOL(ssb_dma_set_mask); - -void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp_flags) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - if (gfp_flags & GFP_DMA) { - /* Workaround: The PCI API does not support passing - * a GFP flag. */ - return dma_alloc_coherent(&dev->bus->host_pci->dev, - size, dma_handle, gfp_flags); - } - return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle); -#endif - case SSB_BUSTYPE_SSB: - return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags); - default: - __ssb_dma_not_implemented(dev); - } - return NULL; -} -EXPORT_SYMBOL(ssb_dma_alloc_consistent); - -void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - gfp_t gfp_flags) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - if (gfp_flags & GFP_DMA) { - /* Workaround: The PCI API does not support passing - * a GFP flag. */ - dma_free_coherent(&dev->bus->host_pci->dev, - size, vaddr, dma_handle); - return; - } - pci_free_consistent(dev->bus->host_pci, size, - vaddr, dma_handle); - return; -#endif - case SSB_BUSTYPE_SSB: - dma_free_coherent(dev->dev, size, vaddr, dma_handle); - return; - default: - __ssb_dma_not_implemented(dev); - } -} -EXPORT_SYMBOL(ssb_dma_free_consistent); - int ssb_bus_may_powerdown(struct ssb_bus *bus) { struct ssb_chipcommon *cc; diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 6dcda86be6ebdb83d579d27907ee1f153889000a..6e88d2b603b4ccb417506298ab9eadc5633eeccd 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -626,11 +626,22 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, return -ENODEV; } if (bus->chipco.dev) { /* can be unavailible! */ - bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ? - SSB_SPROM_BASE1 : SSB_SPROM_BASE31; + /* + * get SPROM offset: SSB_SPROM_BASE1 except for + * chipcommon rev >= 31 or chip ID is 0x4312 and + * chipcommon status & 3 == 2 + */ + if (bus->chipco.dev->id.revision >= 31) + bus->sprom_offset = SSB_SPROM_BASE31; + else if (bus->chip_id == 0x4312 && + (bus->chipco.status & 0x03) == 2) + bus->sprom_offset = SSB_SPROM_BASE31; + else + bus->sprom_offset = SSB_SPROM_BASE1; } else { bus->sprom_offset = SSB_SPROM_BASE1; } + ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset); buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); if (!buf) diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index 7a582e80de188bccaea0e8a8e051a850a6ba409a..96c86c873011a428c832aafc39a589408f76aa6b 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@ -71,7 +71,7 @@ static int is_valid_iface(struct net_device *net_dev) #endif /* Device is being bridged */ - /* if (net_dev->br_port != NULL) + /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) return 0; */ return 1; @@ -440,6 +440,7 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, struct batman_packet *batman_packet; struct batman_if *batman_if; struct net_device_stats *stats; + struct rtnl_link_stats64 temp; int ret; skb = skb_share_check(skb, GFP_ATOMIC); @@ -468,7 +469,7 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, if (batman_if->if_status != IF_ACTIVE) goto err_free; - stats = (struct net_device_stats *)dev_get_stats(skb->dev); + stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp); if (stats) { stats->rx_packets++; stats->rx_bytes += skb->len; diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 7e0be8d00dc3a55c2d4cb031c443d8c43c0de164..10a82ef3021529071476d6f5cc1ddbce9f093544 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -113,7 +113,7 @@ int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!priv->phydev) return -EINVAL; - return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); + return phy_mii_ioctl(priv->phydev, rq, cmd); } static void cvm_oct_adjust_link(struct net_device *dev) diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index c89990f5e0183a6c4701c103d5bb16b6182f7a14..101ffc965ee00e2880043fafb35579ff0337782b 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -866,50 +866,50 @@ static void cxacru_poll_status(struct work_struct *work) instance->line_status = buf[CXINF_LINE_STATUS]; switch (instance->line_status) { case 0: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: down\n"); break; case 1: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: attempting to activate\n"); break; case 2: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: training\n"); break; case 3: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: channel analysis\n"); break; case 4: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: exchange\n"); break; case 5: atm_dev->link_rate = buf[CXINF_DOWNSTREAM_RATE] * 1000 / 424; - atm_dev->signal = ATM_PHY_SIG_FOUND; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND); atm_info(usbatm, "ADSL line: up (%d kb/s down | %d kb/s up)\n", buf[CXINF_DOWNSTREAM_RATE], buf[CXINF_UPSTREAM_RATE]); break; case 6: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: waiting\n"); break; case 7: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: initializing\n"); break; default: - atm_dev->signal = ATM_PHY_SIG_UNKNOWN; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "Unknown line state %02x\n", instance->line_status); break; } diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c index 1335456b4f9396284aea64258199cd9575e49924..80f9617d3a1515ae137144464f1d1da490113dc8 100644 --- a/drivers/usb/atm/speedtch.c +++ b/drivers/usb/atm/speedtch.c @@ -525,7 +525,7 @@ static void speedtch_check_status(struct work_struct *work) switch (status) { case 0: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); if (instance->last_status) atm_info(usbatm, "ADSL line is down\n"); /* It may never resync again unless we ask it to... */ @@ -533,12 +533,12 @@ static void speedtch_check_status(struct work_struct *work) break; case 0x08: - atm_dev->signal = ATM_PHY_SIG_UNKNOWN; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "ADSL line is blocked?\n"); break; case 0x10: - atm_dev->signal = ATM_PHY_SIG_LOST; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line is synchronising\n"); break; @@ -554,7 +554,7 @@ static void speedtch_check_status(struct work_struct *work) } atm_dev->link_rate = down_speed * 1000 / 424; - atm_dev->signal = ATM_PHY_SIG_FOUND; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND); atm_info(usbatm, "ADSL line is up (%d kb/s down | %d kb/s up)\n", @@ -562,7 +562,7 @@ static void speedtch_check_status(struct work_struct *work) break; default: - atm_dev->signal = ATM_PHY_SIG_UNKNOWN; + atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "unknown line state %02x\n", status); break; } diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index e213d3fa4920a22d261d7113089b2759a011cad1..ebae94480140e01322ba9176ac808f71296567b1 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -575,6 +575,13 @@ MODULE_PARM_DESC(annex, sc->usbatm->atm_dev->type = val; \ } while (0) +#define UPDATE_ATM_SIGNAL(val) \ + do { \ + if (sc->usbatm->atm_dev) \ + atm_dev_signal_change(sc->usbatm->atm_dev, val); \ + } while (0) + + /* Firmware loading */ #define LOAD_INTERNAL 0xA0 #define F8051_USBCS 0x7f92 @@ -1359,7 +1366,7 @@ static int uea_stat_e1(struct uea_softc *sc) /* always update it as atm layer could not be init when we switch to * operational state */ - UPDATE_ATM_STAT(signal, ATM_PHY_SIG_FOUND); + UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND); /* wake up processes waiting for synchronization */ wake_up(&sc->sync_q); @@ -1498,7 +1505,7 @@ static int uea_stat_e4(struct uea_softc *sc) /* always update it as atm layer could not be init when we switch to * operational state */ - UPDATE_ATM_STAT(signal, ATM_PHY_SIG_FOUND); + UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND); /* wake up processes waiting for synchronization */ wake_up(&sc->sync_q); @@ -1825,7 +1832,7 @@ static int uea_start_reset(struct uea_softc *sc) * So we will failed to wait Ready CMV. */ sc->cmv_ack = 0; - UPDATE_ATM_STAT(signal, ATM_PHY_SIG_LOST); + UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST); /* reset statistics */ memset(&sc->stats, 0, sizeof(struct uea_stats)); diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 5c0d06c79a81f1f5e5198aa1576e5e2d29d369f7..020fa5a25fda8fe7bb6c1ca23f28f90d120bd537 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c @@ -171,7 +171,8 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, int i, count; rndis_query_cmplt_type *resp; struct net_device *net; - const struct net_device_stats *stats; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats; if (!r) return -ENOMEM; resp = (rndis_query_cmplt_type *) r->buf; @@ -194,7 +195,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, resp->InformationBufferOffset = cpu_to_le32 (16); net = rndis_per_dev_params[configNr].dev; - stats = dev_get_stats(net); + stats = dev_get_stats(net, &temp); switch (OID) { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index d219070fed3da07b3944ac53363673f310435f33..29e850a7a2f9871b7c9658e0717f8926dc5bc4ac 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -74,6 +74,22 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to, } return seg; } +/* Copy iovec entries for len bytes from iovec. */ +static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, + size_t len, int iovcount) +{ + int seg = 0; + size_t size; + while (len && seg < iovcount) { + size = min(from->iov_len, len); + to->iov_base = from->iov_base; + to->iov_len = size; + len -= size; + ++from; + ++to; + ++seg; + } +} /* Caller must have TX VQ lock */ static void tx_poll_stop(struct vhost_net *net) @@ -129,7 +145,7 @@ static void handle_tx(struct vhost_net *net) if (wmem < sock->sk->sk_sndbuf / 2) tx_poll_stop(net); - hdr_size = vq->hdr_size; + hdr_size = vq->vhost_hlen; for (;;) { head = vhost_get_vq_desc(&net->dev, vq, vq->iov, @@ -172,7 +188,7 @@ static void handle_tx(struct vhost_net *net) /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(NULL, sock, &msg, len); if (unlikely(err < 0)) { - vhost_discard_vq_desc(vq); + vhost_discard_vq_desc(vq, 1); tx_poll_start(net, sock); break; } @@ -191,9 +207,82 @@ static void handle_tx(struct vhost_net *net) unuse_mm(net->dev.mm); } +static int peek_head_len(struct sock *sk) +{ + struct sk_buff *head; + int len = 0; + + lock_sock(sk); + head = skb_peek(&sk->sk_receive_queue); + if (head) + len = head->len; + release_sock(sk); + return len; +} + +/* This is a multi-buffer version of vhost_get_desc, that works if + * vq has read descriptors only. + * @vq - the relevant virtqueue + * @datalen - data length we'll be reading + * @iovcount - returned count of io vectors we fill + * @log - vhost log + * @log_num - log offset + * returns number of buffer heads allocated, negative on error + */ +static int get_rx_bufs(struct vhost_virtqueue *vq, + struct vring_used_elem *heads, + int datalen, + unsigned *iovcount, + struct vhost_log *log, + unsigned *log_num) +{ + unsigned int out, in; + int seg = 0; + int headcount = 0; + unsigned d; + int r, nlogs = 0; + + while (datalen > 0) { + if (unlikely(headcount >= VHOST_NET_MAX_SG)) { + r = -ENOBUFS; + goto err; + } + d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, + ARRAY_SIZE(vq->iov) - seg, &out, + &in, log, log_num); + if (d == vq->num) { + r = 0; + goto err; + } + if (unlikely(out || in <= 0)) { + vq_err(vq, "unexpected descriptor format for RX: " + "out %d, in %d\n", out, in); + r = -EINVAL; + goto err; + } + if (unlikely(log)) { + nlogs += *log_num; + log += *log_num; + } + heads[headcount].id = d; + heads[headcount].len = iov_length(vq->iov + seg, in); + datalen -= heads[headcount].len; + ++headcount; + seg += in; + } + heads[headcount - 1].len += datalen; + *iovcount = seg; + if (unlikely(log)) + *log_num = nlogs; + return headcount; +err: + vhost_discard_vq_desc(vq, headcount); + return r; +} + /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ -static void handle_rx(struct vhost_net *net) +static void handle_rx_big(struct vhost_net *net) { struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; unsigned out, in, log, s; @@ -223,7 +312,7 @@ static void handle_rx(struct vhost_net *net) use_mm(net->dev.mm); mutex_lock(&vq->mutex); vhost_disable_notify(vq); - hdr_size = vq->hdr_size; + hdr_size = vq->vhost_hlen; vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? vq->log : NULL; @@ -270,14 +359,14 @@ static void handle_rx(struct vhost_net *net) len, MSG_DONTWAIT | MSG_TRUNC); /* TODO: Check specific error and bomb out unless EAGAIN? */ if (err < 0) { - vhost_discard_vq_desc(vq); + vhost_discard_vq_desc(vq, 1); break; } /* TODO: Should check and handle checksum. */ if (err > len) { pr_debug("Discarded truncated rx packet: " " len %d > %zd\n", err, len); - vhost_discard_vq_desc(vq); + vhost_discard_vq_desc(vq, 1); continue; } len = err; @@ -302,54 +391,175 @@ static void handle_rx(struct vhost_net *net) unuse_mm(net->dev.mm); } -static void handle_tx_kick(struct work_struct *work) +/* Expects to be always run from workqueue - which acts as + * read-size critical section for our kind of RCU. */ +static void handle_rx_mergeable(struct vhost_net *net) { - struct vhost_virtqueue *vq; - struct vhost_net *net; - vq = container_of(work, struct vhost_virtqueue, poll.work); - net = container_of(vq->dev, struct vhost_net, dev); + struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; + unsigned uninitialized_var(in), log; + struct vhost_log *vq_log; + struct msghdr msg = { + .msg_name = NULL, + .msg_namelen = 0, + .msg_control = NULL, /* FIXME: get and handle RX aux data. */ + .msg_controllen = 0, + .msg_iov = vq->iov, + .msg_flags = MSG_DONTWAIT, + }; + + struct virtio_net_hdr_mrg_rxbuf hdr = { + .hdr.flags = 0, + .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE + }; + + size_t total_len = 0; + int err, headcount; + size_t vhost_hlen, sock_hlen; + size_t vhost_len, sock_len; + struct socket *sock = rcu_dereference(vq->private_data); + if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) + return; + + use_mm(net->dev.mm); + mutex_lock(&vq->mutex); + vhost_disable_notify(vq); + vhost_hlen = vq->vhost_hlen; + sock_hlen = vq->sock_hlen; + + vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? + vq->log : NULL; + + while ((sock_len = peek_head_len(sock->sk))) { + sock_len += sock_hlen; + vhost_len = sock_len + vhost_hlen; + headcount = get_rx_bufs(vq, vq->heads, vhost_len, + &in, vq_log, &log); + /* On error, stop handling until the next kick. */ + if (unlikely(headcount < 0)) + break; + /* OK, now we need to know about added descriptors. */ + if (!headcount) { + if (unlikely(vhost_enable_notify(vq))) { + /* They have slipped one in as we were + * doing that: check again. */ + vhost_disable_notify(vq); + continue; + } + /* Nothing new? Wait for eventfd to tell us + * they refilled. */ + break; + } + /* We don't need to be notified again. */ + if (unlikely((vhost_hlen))) + /* Skip header. TODO: support TSO. */ + move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in); + else + /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: + * needed because sendmsg can modify msg_iov. */ + copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in); + msg.msg_iovlen = in; + err = sock->ops->recvmsg(NULL, sock, &msg, + sock_len, MSG_DONTWAIT | MSG_TRUNC); + /* Userspace might have consumed the packet meanwhile: + * it's not supposed to do this usually, but might be hard + * to prevent. Discard data we got (if any) and keep going. */ + if (unlikely(err != sock_len)) { + pr_debug("Discarded rx packet: " + " len %d, expected %zd\n", err, sock_len); + vhost_discard_vq_desc(vq, headcount); + continue; + } + if (unlikely(vhost_hlen) && + memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0, + vhost_hlen)) { + vq_err(vq, "Unable to write vnet_hdr at addr %p\n", + vq->iov->iov_base); + break; + } + /* TODO: Should check and handle checksum. */ + if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF) && + memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount, + offsetof(typeof(hdr), num_buffers), + sizeof hdr.num_buffers)) { + vq_err(vq, "Failed num_buffers write"); + vhost_discard_vq_desc(vq, headcount); + break; + } + vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, + headcount); + if (unlikely(vq_log)) + vhost_log_write(vq, vq_log, log, vhost_len); + total_len += vhost_len; + if (unlikely(total_len >= VHOST_NET_WEIGHT)) { + vhost_poll_queue(&vq->poll); + break; + } + } + + mutex_unlock(&vq->mutex); + unuse_mm(net->dev.mm); +} + +static void handle_rx(struct vhost_net *net) +{ + if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF)) + handle_rx_mergeable(net); + else + handle_rx_big(net); +} + +static void handle_tx_kick(struct vhost_work *work) +{ + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); + handle_tx(net); } -static void handle_rx_kick(struct work_struct *work) +static void handle_rx_kick(struct vhost_work *work) { - struct vhost_virtqueue *vq; - struct vhost_net *net; - vq = container_of(work, struct vhost_virtqueue, poll.work); - net = container_of(vq->dev, struct vhost_net, dev); + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); + handle_rx(net); } -static void handle_tx_net(struct work_struct *work) +static void handle_tx_net(struct vhost_work *work) { - struct vhost_net *net; - net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work); + struct vhost_net *net = container_of(work, struct vhost_net, + poll[VHOST_NET_VQ_TX].work); handle_tx(net); } -static void handle_rx_net(struct work_struct *work) +static void handle_rx_net(struct vhost_work *work) { - struct vhost_net *net; - net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work); + struct vhost_net *net = container_of(work, struct vhost_net, + poll[VHOST_NET_VQ_RX].work); handle_rx(net); } static int vhost_net_open(struct inode *inode, struct file *f) { struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); + struct vhost_dev *dev; int r; + if (!n) return -ENOMEM; + + dev = &n->dev; n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; - r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX); + r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); if (r < 0) { kfree(n); return r; } - vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT); - vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN); + vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); + vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); n->tx_poll_state = VHOST_NET_POLL_DISABLED; f->private_data = n; @@ -527,13 +737,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) /* start polling new socket */ oldsock = vq->private_data; - if (sock == oldsock) - goto done; + if (sock != oldsock) { + vhost_net_disable_vq(n, vq); + rcu_assign_pointer(vq->private_data, sock); + vhost_net_enable_vq(n, vq); + } - vhost_net_disable_vq(n, vq); - rcu_assign_pointer(vq->private_data, sock); - vhost_net_enable_vq(n, vq); -done: mutex_unlock(&vq->mutex); if (oldsock) { @@ -574,9 +783,21 @@ static long vhost_net_reset_owner(struct vhost_net *n) static int vhost_net_set_features(struct vhost_net *n, u64 features) { - size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ? - sizeof(struct virtio_net_hdr) : 0; + size_t vhost_hlen, sock_hlen, hdr_len; int i; + + hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? + sizeof(struct virtio_net_hdr_mrg_rxbuf) : + sizeof(struct virtio_net_hdr); + if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { + /* vhost provides vnet_hdr */ + vhost_hlen = hdr_len; + sock_hlen = 0; + } else { + /* socket provides vnet_hdr */ + vhost_hlen = 0; + sock_hlen = hdr_len; + } mutex_lock(&n->dev.mutex); if ((features & (1 << VHOST_F_LOG_ALL)) && !vhost_log_access_ok(&n->dev)) { @@ -587,7 +808,8 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) smp_wmb(); for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { mutex_lock(&n->vqs[i].mutex); - n->vqs[i].hdr_size = hdr_size; + n->vqs[i].vhost_hlen = vhost_hlen; + n->vqs[i].sock_hlen = sock_hlen; mutex_unlock(&n->vqs[i].mutex); } vhost_net_flush(n); @@ -639,7 +861,7 @@ static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, } #endif -const static struct file_operations vhost_net_fops = { +static const struct file_operations vhost_net_fops = { .owner = THIS_MODULE, .release = vhost_net_release, .unlocked_ioctl = vhost_net_ioctl, @@ -657,25 +879,13 @@ static struct miscdevice vhost_net_misc = { static int vhost_net_init(void) { - int r = vhost_init(); - if (r) - goto err_init; - r = misc_register(&vhost_net_misc); - if (r) - goto err_reg; - return 0; -err_reg: - vhost_cleanup(); -err_init: - return r; - + return misc_register(&vhost_net_misc); } module_init(vhost_net_init); static void vhost_net_exit(void) { misc_deregister(&vhost_net_misc); - vhost_cleanup(); } module_exit(vhost_net_exit); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 0b99783083f61504bd77fc353e9c9f99a09f7bfb..e05557d529992ec4deb1827a32574043bcd00925 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -17,12 +17,13 @@ #include #include #include -#include #include #include #include #include #include +#include +#include #include #include @@ -37,8 +38,6 @@ enum { VHOST_MEMORY_F_LOG = 0x1, }; -static struct workqueue_struct *vhost_workqueue; - static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { @@ -52,23 +51,31 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) { - struct vhost_poll *poll; - poll = container_of(wait, struct vhost_poll, wait); + struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); + if (!((unsigned long)key & poll->mask)) return 0; - queue_work(vhost_workqueue, &poll->work); + vhost_poll_queue(poll); return 0; } /* Init poll structure */ -void vhost_poll_init(struct vhost_poll *poll, work_func_t func, - unsigned long mask) +void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, + unsigned long mask, struct vhost_dev *dev) { - INIT_WORK(&poll->work, func); + struct vhost_work *work = &poll->work; + init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; + poll->dev = dev; + + INIT_LIST_HEAD(&work->node); + work->fn = fn; + init_waitqueue_head(&work->done); + work->flushing = 0; + work->queue_seq = work->done_seq = 0; } /* Start polling a file. We add ourselves to file's wait queue. The caller must @@ -92,12 +99,40 @@ void vhost_poll_stop(struct vhost_poll *poll) * locks that are also used by the callback. */ void vhost_poll_flush(struct vhost_poll *poll) { - flush_work(&poll->work); + struct vhost_work *work = &poll->work; + unsigned seq; + int left; + int flushing; + + spin_lock_irq(&poll->dev->work_lock); + seq = work->queue_seq; + work->flushing++; + spin_unlock_irq(&poll->dev->work_lock); + wait_event(work->done, ({ + spin_lock_irq(&poll->dev->work_lock); + left = seq - work->done_seq <= 0; + spin_unlock_irq(&poll->dev->work_lock); + left; + })); + spin_lock_irq(&poll->dev->work_lock); + flushing = --work->flushing; + spin_unlock_irq(&poll->dev->work_lock); + BUG_ON(flushing < 0); } void vhost_poll_queue(struct vhost_poll *poll) { - queue_work(vhost_workqueue, &poll->work); + struct vhost_dev *dev = poll->dev; + struct vhost_work *work = &poll->work; + unsigned long flags; + + spin_lock_irqsave(&dev->work_lock, flags); + if (list_empty(&work->node)) { + list_add_tail(&work->node, &dev->work_list); + work->queue_seq++; + wake_up_process(dev->worker); + } + spin_unlock_irqrestore(&dev->work_lock, flags); } static void vhost_vq_reset(struct vhost_dev *dev, @@ -114,7 +149,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->used_flags = 0; vq->log_used = false; vq->log_addr = -1ull; - vq->hdr_size = 0; + vq->vhost_hlen = 0; + vq->sock_hlen = 0; vq->private_data = NULL; vq->log_base = NULL; vq->error_ctx = NULL; @@ -125,10 +161,51 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->log_ctx = NULL; } +static int vhost_worker(void *data) +{ + struct vhost_dev *dev = data; + struct vhost_work *work = NULL; + unsigned uninitialized_var(seq); + + for (;;) { + /* mb paired w/ kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irq(&dev->work_lock); + if (work) { + work->done_seq = seq; + if (work->flushing) + wake_up_all(&work->done); + } + + if (kthread_should_stop()) { + spin_unlock_irq(&dev->work_lock); + __set_current_state(TASK_RUNNING); + return 0; + } + if (!list_empty(&dev->work_list)) { + work = list_first_entry(&dev->work_list, + struct vhost_work, node); + list_del_init(&work->node); + seq = work->queue_seq; + } else + work = NULL; + spin_unlock_irq(&dev->work_lock); + + if (work) { + __set_current_state(TASK_RUNNING); + work->fn(work); + } else + schedule(); + + } +} + long vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue *vqs, int nvqs) { int i; + dev->vqs = vqs; dev->nvqs = nvqs; mutex_init(&dev->mutex); @@ -136,6 +213,9 @@ long vhost_dev_init(struct vhost_dev *dev, dev->log_file = NULL; dev->memory = NULL; dev->mm = NULL; + spin_lock_init(&dev->work_lock); + INIT_LIST_HEAD(&dev->work_list); + dev->worker = NULL; for (i = 0; i < dev->nvqs; ++i) { dev->vqs[i].dev = dev; @@ -143,9 +223,9 @@ long vhost_dev_init(struct vhost_dev *dev, vhost_vq_reset(dev, dev->vqs + i); if (dev->vqs[i].handle_kick) vhost_poll_init(&dev->vqs[i].poll, - dev->vqs[i].handle_kick, - POLLIN); + dev->vqs[i].handle_kick, POLLIN, dev); } + return 0; } @@ -159,12 +239,36 @@ long vhost_dev_check_owner(struct vhost_dev *dev) /* Caller should have device mutex */ static long vhost_dev_set_owner(struct vhost_dev *dev) { + struct task_struct *worker; + int err; /* Is there an owner already? */ - if (dev->mm) - return -EBUSY; + if (dev->mm) { + err = -EBUSY; + goto err_mm; + } /* No owner, become one */ dev->mm = get_task_mm(current); + worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); + if (IS_ERR(worker)) { + err = PTR_ERR(worker); + goto err_worker; + } + + dev->worker = worker; + err = cgroup_attach_task_current_cg(worker); + if (err) + goto err_cgroup; + wake_up_process(worker); /* avoid contributing to loadavg */ + return 0; +err_cgroup: + kthread_stop(worker); +err_worker: + if (dev->mm) + mmput(dev->mm); + dev->mm = NULL; +err_mm: + return err; } /* Caller should have device mutex */ @@ -217,6 +321,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev) if (dev->mm) mmput(dev->mm); dev->mm = NULL; + + WARN_ON(!list_empty(&dev->work_list)); + kthread_stop(dev->worker); } static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) @@ -237,8 +344,8 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, { int i; - if (!mem) - return 0; + if (!mem) + return 0; for (i = 0; i < mem->nregions; ++i) { struct vhost_memory_region *m = mem->regions + i; @@ -995,9 +1102,9 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, } /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ -void vhost_discard_vq_desc(struct vhost_virtqueue *vq) +void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) { - vq->last_avail_idx--; + vq->last_avail_idx -= n; } /* After we've used one of their buffers, we tell them about it. We'll then @@ -1042,6 +1149,67 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) return 0; } +static int __vhost_add_used_n(struct vhost_virtqueue *vq, + struct vring_used_elem *heads, + unsigned count) +{ + struct vring_used_elem __user *used; + int start; + + start = vq->last_used_idx % vq->num; + used = vq->used->ring + start; + if (copy_to_user(used, heads, count * sizeof *used)) { + vq_err(vq, "Failed to write used"); + return -EFAULT; + } + if (unlikely(vq->log_used)) { + /* Make sure data is seen before log. */ + smp_wmb(); + /* Log used ring entry write. */ + log_write(vq->log_base, + vq->log_addr + + ((void __user *)used - (void __user *)vq->used), + count * sizeof *used); + } + vq->last_used_idx += count; + return 0; +} + +/* After we've used one of their buffers, we tell them about it. We'll then + * want to notify the guest, using eventfd. */ +int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, + unsigned count) +{ + int start, n, r; + + start = vq->last_used_idx % vq->num; + n = vq->num - start; + if (n < count) { + r = __vhost_add_used_n(vq, heads, n); + if (r < 0) + return r; + heads += n; + count -= n; + } + r = __vhost_add_used_n(vq, heads, count); + + /* Make sure buffer is written before we update index. */ + smp_wmb(); + if (put_user(vq->last_used_idx, &vq->used->idx)) { + vq_err(vq, "Failed to increment used idx"); + return -EFAULT; + } + if (unlikely(vq->log_used)) { + /* Log used index update. */ + log_write(vq->log_base, + vq->log_addr + offsetof(struct vring_used, idx), + sizeof vq->used->idx); + if (vq->log_ctx) + eventfd_signal(vq->log_ctx, 1); + } + return r; +} + /* This actually signals the guest, using eventfd. */ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -1076,6 +1244,15 @@ void vhost_add_used_and_signal(struct vhost_dev *dev, vhost_signal(dev, vq); } +/* multi-buffer version of vhost_add_used_and_signal */ +void vhost_add_used_and_signal_n(struct vhost_dev *dev, + struct vhost_virtqueue *vq, + struct vring_used_elem *heads, unsigned count) +{ + vhost_add_used_n(vq, heads, count); + vhost_signal(dev, vq); +} + /* OK, now we need to know about added descriptors. */ bool vhost_enable_notify(struct vhost_virtqueue *vq) { @@ -1100,7 +1277,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq) return false; } - return avail_idx != vq->last_avail_idx; + return avail_idx != vq->avail_idx; } /* We don't need to be notified again. */ @@ -1115,16 +1292,3 @@ void vhost_disable_notify(struct vhost_virtqueue *vq) vq_err(vq, "Failed to enable notification at %p: %d\n", &vq->used->flags, r); } - -int vhost_init(void) -{ - vhost_workqueue = create_singlethread_workqueue("vhost"); - if (!vhost_workqueue) - return -ENOMEM; - return 0; -} - -void vhost_cleanup(void) -{ - destroy_workqueue(vhost_workqueue); -} diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 11ee13dba0f7444a20313a1e15a66c3c8d029dfd..afd77295971ce3044117d0d6e5ea8f4e20f655fe 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -5,13 +5,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include struct vhost_device; @@ -20,19 +20,31 @@ enum { VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2, }; +struct vhost_work; +typedef void (*vhost_work_fn_t)(struct vhost_work *work); + +struct vhost_work { + struct list_head node; + vhost_work_fn_t fn; + wait_queue_head_t done; + int flushing; + unsigned queue_seq; + unsigned done_seq; +}; + /* Poll a file (eventfd or socket) */ /* Note: there's nothing vhost specific about this structure. */ struct vhost_poll { poll_table table; wait_queue_head_t *wqh; wait_queue_t wait; - /* struct which will handle all actual work. */ - struct work_struct work; + struct vhost_work work; unsigned long mask; + struct vhost_dev *dev; }; -void vhost_poll_init(struct vhost_poll *poll, work_func_t func, - unsigned long mask); +void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, + unsigned long mask, struct vhost_dev *dev); void vhost_poll_start(struct vhost_poll *poll, struct file *file); void vhost_poll_stop(struct vhost_poll *poll); void vhost_poll_flush(struct vhost_poll *poll); @@ -63,7 +75,7 @@ struct vhost_virtqueue { struct vhost_poll poll; /* The routine to call when the Guest pings us, or timeout. */ - work_func_t handle_kick; + vhost_work_fn_t handle_kick; /* Last available index we saw. */ u16 last_avail_idx; @@ -84,13 +96,15 @@ struct vhost_virtqueue { struct iovec indirect[VHOST_NET_MAX_SG]; struct iovec iov[VHOST_NET_MAX_SG]; struct iovec hdr[VHOST_NET_MAX_SG]; - size_t hdr_size; + size_t vhost_hlen; + size_t sock_hlen; + struct vring_used_elem heads[VHOST_NET_MAX_SG]; /* We use a kind of RCU to access private pointer. - * All readers access it from workqueue, which makes it possible to - * flush the workqueue instead of synchronize_rcu. Therefore readers do + * All readers access it from worker, which makes it possible to + * flush the vhost_work instead of synchronize_rcu. Therefore readers do * not need to call rcu_read_lock/rcu_read_unlock: the beginning of - * work item execution acts instead of rcu_read_lock() and the end of - * work item execution acts instead of rcu_read_lock(). + * vhost_work execution acts instead of rcu_read_lock() and the end of + * vhost_work execution acts instead of rcu_read_lock(). * Writers use virtqueue mutex. */ void *private_data; /* Log write descriptors */ @@ -110,6 +124,9 @@ struct vhost_dev { int nvqs; struct file *log_file; struct eventfd_ctx *log_ctx; + spinlock_t work_lock; + struct list_head work_list; + struct task_struct *worker; }; long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs); @@ -124,21 +141,22 @@ int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *, struct iovec iov[], unsigned int iov_count, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num); -void vhost_discard_vq_desc(struct vhost_virtqueue *); +void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); -void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); +int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads, + unsigned count); void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *, - unsigned int head, int len); + unsigned int id, int len); +void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *, + struct vring_used_elem *heads, unsigned count); +void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); void vhost_disable_notify(struct vhost_virtqueue *); bool vhost_enable_notify(struct vhost_virtqueue *); int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len); -int vhost_init(void); -void vhost_cleanup(void); - #define vq_err(vq, fmt, ...) do { \ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ if ((vq)->error_ctx) \ @@ -149,7 +167,8 @@ enum { VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) | (1 << VIRTIO_RING_F_INDIRECT_DESC) | (1 << VHOST_F_LOG_ALL) | - (1 << VHOST_NET_F_VIRTIO_NET_HDR), + (1 << VHOST_NET_F_VIRTIO_NET_HDR) | + (1 << VIRTIO_NET_F_MRG_RXBUF), }; static inline int vhost_has_feature(struct vhost_dev *dev, int bit) diff --git a/firmware/Makefile b/firmware/Makefile index 020e629a615cea43c705aa4bb40e3c0384f3161a..b27f09f05d177fb45cd0dc25f674853656ad3789 100644 --- a/firmware/Makefile +++ b/firmware/Makefile @@ -42,7 +42,7 @@ fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \ cxgb3/t3c_psram-1.1.0.bin \ - cxgb3/t3fw-7.4.0.bin \ + cxgb3/t3fw-7.10.0.bin \ cxgb3/ael2005_opt_edc.bin \ cxgb3/ael2005_twx_edc.bin \ cxgb3/ael2020_twx_edc.bin diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex new file mode 100644 index 0000000000000000000000000000000000000000..96399d87bd3581abea09f7cf1c879d3c30ab063f --- /dev/null +++ b/firmware/cxgb3/t3fw-7.10.0.bin.ihex @@ -0,0 +1,1935 @@ +:1000000060007400200380002003700000001000D6 +:1000100000002000E100028400070000E1000288E7 +:1000200000010000E0000000E00000A0010000006E +:1000300044444440E3000183200200002001E0002A +:100040002001FF101FFFD0001FFFC000E300043C91 +:100050000200000020006C841FFFC2A020006CCCB6 +:100060001FFFC2A420006D0C1FFFC2A820006D80DE +:100070001FFFC2AC200003C0C00000E43100EA3121 +:1000800000A13100A03103020002ED306E2A05000C +:10009000ED3100020002160012FFDBC03014FFDA5F +:1000A000D30FD30FD30F03431F244C107249F0D347 +:1000B0000FD30FD30F12FFD5230A00240A00D30F4A +:1000C000D30FD30F03431F244C107249F0D30FD327 +:1000D0000FD30F14FFCE03421F14FFCB03421F1296 +:1000E000FFCCC0302D37302D37342D37382D373CED +:1000F000233D017233ED00020012FFC4C0302F37E0 +:10010000002F37102F37202F3730233D017233ED6A +:1001100000020012FFBEC0302737002737102737F4 +:1001200020273730233D017233ED03020012FFB95F +:1001300013FFBA0C0200932012FFB913FFB90C028F +:1001400000932012FFB8C0319320822012FFB71312 +:10015000FFB7932012FFB715FFB316FFB6C030D715 +:100160002005660160001B00000000000000000088 +:10017000043605000200D30FD30F05330C6E3B1479 +:100180000747140704437631E604360505330C6F40 +:100190003BED00020012FFA615FFA3230A00D720A3 +:1001A000070443043E0505330C0747146F3BF00377 +:1001B000020012FFA1C03014FFA1D30FD30FD30F41 +:1001C0009340B4447249F2D30FD30FD30F14FF9B63 +:1001D000834014FF9B834012FF9B230A0014FF9A65 +:1001E000D30FD30FD30F9340B4447249F2D30FD33C +:1001F0000FD30F14FF95834012FF95C92F832084DE +:10020000218522BC22743B0F8650B4559630B433FE +:100210007433F463FFE60000653FE1655FDE12FFC3 +:100220007C230A0028374028374428374828374C91 +:10023000233D017233ED03020000020012FF7AC079 +:1002400032032E0503020012FF7813FF819320C0B2 +:1002500011014931004831010200C00014FF7E0441 +:10026000D23115FF7D945014FF7D04D33115FF7CEE +:10027000945014FF7C04D43115FF7C24560014FFE5 +:100280007B04D53115FF7B24560010FF7A03000054 +:10029000000000000000000000000000000000005E +:1002A000000000000000000000000000000000004E +:1002B000000000000000000000000000000000003E +:1002C000000000000000000000000000000000002E +:1002D000000000000000000000000000000000001E +:1002E000000000000000000000000000000000000E +:1002F00000000000000000000000000000000000FE +:1003000000000000000000000000000000000000ED +:1003100000000000000000000000000000000000DD +:1003200000000000000000000000000000000000CD +:1003300000000000000000000000000000000000BD +:1003400000000000000000000000000000000000AD +:10035000000000000000000000000000000000009D +:10036000000000000000000000000000000000008D +:10037000000000000000000000000000000000007D +:10038000000000000000000000000000000000006D +:10039000000000000000000000000000000000005D +:1003A000000000000000000000000000000000004D +:1003B000000000000000000000000000000000003D +:1003C000000000000000000000000000000000002D +:1003D000000000000000000000000000000000001D +:1003E000000000000000000000000000000000000D +:1003F00000000000000000000000000000000000FD +:1004000000000000000000000000000000000000EC +:1004100000000000000000000000000000000000DC +:1004200063FFFC000000000000000000000000006E +:100430000000000000000000000000001FFC0000A1 +:100440001FFC0000E30005C81FFC00001FFC0000AB +:10045000E30005C81FFC00001FFC0000E30005C806 +:100460001FFFC0001FFFC000E30005C81FFFC00042 +:100470001FFFC018E30005C81FFFC0181FFFC018EA +:10048000E30005E01FFFC0181FFFC294E30005E072 +:100490001FFFC2941FFFC294E300085C1FFFC2A0AD +:1004A0001FFFC59CE300085C200000002000016ADB +:1004B000E3000B582000018020000180E3000CC401 +:1004C0002000020020000203E3000CC42000021CF4 +:1004D00020000220E3000CC8200002202000022699 +:1004E000E3000CCC2000023C20000240E3000CD4CE +:1004F0002000024020000249E3000CD82000024CFA +:1005000020000250E3000CE42000025020000259B9 +:10051000E3000CE82000025C20000260E3000CF421 +:100520002000026020000269E3000CF82000026C49 +:1005300020000270E3000D04200002702000027908 +:10054000E3000D082000028C2000028CE3000D1453 +:100550002000029020000293E3000D14200002AC62 +:10056000200002B0E3000D18200002D0200002F2AB +:10057000E3000D1C200003B0200003B0E3000D4099 +:10058000200003B0200003B0E3000D40200003B0C2 +:10059000200003B0E3000D40200003B0200003B0B2 +:1005A000E3000D40200003B020006EA4E3000D40E6 +:1005B00020006EA420006EA4E30078340000000048 +:1005C00000000000000000001FFC00001FFC0000F5 +:1005D0001FFFC5A01FFFC69020006EA820006EA8B8 +:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054 +:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D +:10060000003FFFFF8040000010000000080FFFFFC8 +:100610001FFFC27D000FFFFF804FFFFF8000000023 +:1006200000000880B000000560500000600000007D +:1006300040000011350000004100000010000001E2 +:100640002000000000001000400000000500000035 +:10065000800000190400000000000800E100020012 +:1006600010000005806000007000000020000009FC +:10067000001FF8008000001EA0000000F80000002D +:1006800007FFFFFF080000001800000001008001C4 +:10069000420000001FFFC22D1FFFC0EC00010080C0 +:1006A000604000001A0000000C0000001000000A6A +:1006B000000030000001000080000018FC00000075 +:1006C0008000000100004000600008008000001C65 +:1006D0008000001A030000008000040004030403EB +:1006E00050000003FFFFBFFF1FFFC3E400000FFF28 +:1006F000FFFFF000000016D00000FFF7A50000008B +:100700001FFFC4C01FFFC4710001000800000B20C0 +:10071000202FFF801FFFC46500002C00FFFEFFF8A4 +:1007200000FFFFFF1FFFC58800002000FFFFDFFF65 +:100730000000FFEF010011001FFFC3E21FFFC5A073 +:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003 +:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033 +:100760000001FBD01FFFC5C01FFFC6801FFFC5A132 +:10077000E0FFFE001FFFC5B0000080001FFFC54C5A +:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4 +:10079000000100817FFFFFFFE1000600000027103D +:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009 +:1007B0000003D0901FFFC5742B5063802B507980AD +:1007C0002B5090802B50A6801FFFC4790100110F81 +:1007D000202FFE0020300080202FFF000000FFFFB0 +:1007E0000001FFF82B50B2002B50B208000100109E +:1007F0002B50B1802B50B2802B50BA000001001159 +:100800002B50BD282B50BC802B50BDA020300000A9 +:10081000DFFFFE005000000200C0000002000000E8 +:10082000FFFFF7F41FFFC07C000FF800044000003A +:10083000001000000C4000001C400000E00000A080 +:100840001FFFC5501FFD00081FFFC5641FFFC578AF +:100850001FFFC58CE1000690E10006EC00000000DF +:100860000000000000000000000000000100000087 +:100870000000000000000000000000002010004008 +:10088000201000402010004020140080200C0000A8 +:10089000200C0000200C00002010004020140080DC +:1008A0002014008020140080201800C0201C0100AB +:1008B000201C0100201C010020200140201800C045 +:1008C000201800C0201800C0201C0100201800C003 +:1008D000201800C0201800C0201C0100202001406A +:1008E00020200140202001402020094020200940F4 +:1008F000202009402020094020240980FFFFFFFF1D +:10090000FFFFFFFFFFFFFFFF0000000000000000EF +:1009100000000000000000000000000020005588DA +:1009200020005458200055882000558820005394FA +:100930002000539420005394200051D4200051D41F +:10094000200051CC2000513820004FE020004DC045 +:1009500020004B94000000000000000020005558CB +:1009600020005424200054C8200054C82000527C89 +:100970002000527C2000527C2000527C2000527CBF +:10098000200051C42000527C20004F0020004D70F8 +:1009900020004B40000000000000000020000BF091 +:1009A00020003ADC200004C02000473020000BE883 +:1009B000200041F4200003F0200046F020004B1CF2 +:1009C00020003F0020003E1C20003A58200038E85C +:1009D00020003658200031B820003C7820002DD06F +:1009E0002000286420006828200023F0200020D068 +:1009F0002000207C20001D68200018602000158841 +:100A000020000E5420000C3420001134200013204C +:100A1000200043EC20003EB420000BF8200004C06E +:100A200000000000000000000000000000000000C6 +:100A300000000000000000000000000000000000B6 +:100A400000000000000000000000000000000000A6 +:100A50000000000000000000000000000000000096 +:100A60000000000000000000000000000000000086 +:100A70000000000000000000000000000000000076 +:100A80000000000000000000000000000000000066 +:100A90000000000000000000000000000000000056 +:100AA0003264000000000000326400006400640052 +:100AB00064006400640064006400640000000000DE +:100AC0000000000000000000000000000000000026 +:100AD0000000000000000000000000000000000016 +:100AE0000000000000000000000000000000000006 +:100AF00000000000000000000000000000000000F6 +:100B000000000000000010000000000000000000D5 +:100B100000000000000000000000000000001000C5 +:100B200000000000000000000000000000000000C5 +:100B300000432380000000000000000000000000CF +:100B400000000000000000000000000000000000A5 +:100B50000000000000000000005C94015D94025E53 +:100B600094035F94004300000000000000000000B8 +:100B70000000000000000000000000000000000075 +:100B80000000000000000000000000000000000065 +:100B90000000000000000000005C90015D90025E1B +:100BA00090035F9000530000000000000000000070 +:100BB0000000000000000000000000000000000035 +:100BC0000000000000000000000000000000000025 +:100BD0000000000000000000009C94001D90019D9A +:100BE00094029E94039F94040894050994060A9421 +:100BF000070B94004300000000000000000000000C +:100C000000000000000000000000000000000000E4 +:100C10000000000000000000009C90019D90029EDA +:100C200090071D90039F90047890057990067A9024 +:100C3000077B90005300000000000000000000004F +:100C400000000000000000000000000000000000A4 +:100C5000000000000000000000DC94001D9001DD99 +:100C60009402DE9403DF940404940505940606942C +:100C70000707940808940909940A0A940B0B940036 +:100C80004300000000000000000000000000000021 +:100C9000000000000000000000DC9001DD9002DE9A +:100CA000900B1D9003DF9004B49005B59006B690AC +:100CB00007B79008B89009B9900ABA900BBB90009A +:100CC0005300000063FFFC0020006C6010FFFF0A6F +:100CD0000000000020006C8400D23110FFFE0A00EA +:100CE0000000000020006CCC00D33110FFFE0A0091 +:100CF0000000000020006D0C00D43110FFFE0A003F +:100D00000000000020006D8000D53110FFFE0A00B9 +:100D10000000000063FFFC00E00000A012FFF7826B +:100D200020028257C82163FFFC12FFF303E830045E +:100D3000EE3005C03093209421952263FFFC000023 +:100D40001FFFD000000400201FFFC5A01FFFC6909A +:100D5000200A0011FFFB13FFFB03E631010200161E +:100D6000FFFA17FFFAD30F776B069060B4667763CC +:100D7000F85415F3541AA50F140063FFF90000008E +:100D80006C1004C020D10F006C1004C0C71AEF060D +:100D9000D830BC2BD72085720D4211837105450BCD +:100DA000957202330C2376017B3B04233D0893713B +:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004 +:100DC000088202280A01038E380E0E42C8EE29A6B8 +:100DD0007E6D4A0500208800308C8271D10FC0F0F2 +:100DE000028F387FC0EA63FFE400C0F1C050037E89 +:100DF0000CA2EE0E3D1208820203F538050542CB27 +:100E00005729A67E2FDC100F4F366DFA050020887B +:100E100000308CBC75C03008E208280A0105833810 +:100E2000030342C93E29A67E0D480CD30F6D8A05E7 +:100E300000208800B08C8271D10FC05008F5387541 +:100E4000C0C163FFBBC06002863876C0DA63FFD4DE +:100E50006C101216EED8C1F9C1E8C1C72B221E28AA +:100E6000221DC0D07B81352920060BB702299CFAB0 +:100E7000655008282072288CFF2824726491642A07 +:100E8000B0000CA80C64816F0EA90C6492BB7FA10A +:100E90003FC1CE7CA13669AC336000370029200603 +:100EA000D7D0299CFACC57282072288CFF2824728E +:100EB0006491392AD0000CA80C6481680EA90C64D6 +:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC +:100ED0000F2D25028A32C0900A6E5065E5B529248F +:100EE00067090F4765F5B12C200C1FEEB30CCE112E +:100EF000AFEE29E286B44879830260058219EEAF2D +:100F000009C90A2992A36890078F2009FF0C65F58B +:100F10006E2FE28564F56865559628221D7B810554 +:100F2000D9B060000200C0908B9417EEA50B881416 +:100F300087740B0B47A87718EEA309BB100877023C +:100F400097F018EEA117EEA208A8010B8802074738 +:100F5000021BEE9E97F10B880298F22790232B90AC +:100F60002204781006BB1007471208BB0228902104 +:100F70000777100C88100788020B880217EE968BF3 +:100F80003307BB0187340B880298F3979997F48B4A +:100F90009587399BF588968B3898F688979BF897B4 +:100FA000F998F717EE8D28E28507C7082D74CF084A +:100FB000480B28E68565550F2B221E28221D7B89AC +:100FC000022B0A0064BF052CB00728B000DA200607 +:100FD000880A28824CC0D10B8000DBA065AFE76394 +:100FE000FEEA0000292072659E946004E72A2072C0 +:100FF00065AEBF6004DE00002EB0032C2067D4E095 +:1010000065C1058A328C330AFF500C4554BC5564C7 +:10101000F4EB19EE72882A09A90109880C64821F71 +:10102000C0926000DD2ED0032A2067D4E065A0D8EE +:101030008A328B330AFC500B4554BC5564C4BE192C +:10104000EE67882A09A9017989D50BEA5064A4E3DF +:101050000CEE11C0F02F16132E16168AE78CE82A14 +:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001 +:101070006583468837DBC0AE89991E789B022BCCEE +:10108000012B161B29120E2B0A0029161A7FC307E3 +:101090007FC9027EAB01C0B165B49D8B352F0A00BC +:1010A0002A0A007AC30564C3CB2F0A0165F4892B91 +:1010B00012162B1619005104C0C100CC1A2CCCFFFB +:1010C0002C16170CFC132C16182B121A2A121BDCC8 +:1010D000505819B6C0D0C0902E5CF42C12172812AC +:1010E000182F121B2A121A08FF010CAA01883407B4 +:1010F0004C0AAB8B2812192BC6162F86082A860994 +:101100002E74102924672E70038975B1EA2A74039E +:10111000B09909490C659DB42B20672D250265B354 +:10112000FA2B221E2C221D7BC901C0B064BD9D2C50 +:10113000B00728B000DA2006880A28824CC0D10BFC +:101140008000DBA065AFE763FD8289BAB199659045 +:101150009788341CEE2398BA8F331EEE1C0F4F5421 +:101160002FB42C8D2A8A320EDD020CAC017DC966AB +:101170000A49516F92608A3375A65B2CB0130AED51 +:10118000510DCD010D0D410C0C417DC9492EB01200 +:10119000B0EE65E3C6C0D08E378CB88A368FB97C86 +:1011A000A3077AC9027EFB01C0D1CED988350AAD2A +:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26 +:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B +:1011D000EB01C0B164B161C091292467C020D10F77 +:1011E00000008ADAB1AA64A0C02C20672D25026510 +:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7 +:1012000065D28A0A4E516FE202600281C0902924A1 +:1012100067090F4765F2F828221D7B89022B0A0017 +:1012200064BCA92CB00728B000DA2006880A2882FE +:101230004CC0D10B8000DBA065AFE763FC8E0000E3 +:101240000CE9506492ED0CEF11C080281611AFBF6D +:101250002F16198EF88BF7DAE08FF92B1610ABFBEF +:101260007FBB01B1EA0CA8506580D68837DCE0AFBF +:1012700089991C789B022CEC012C161B29120C2C32 +:101280000A0029161A7AE3077AE9027FBB01C0C176 +:1012900065C2A58B352C0A002A0A007AE30564E1B1 +:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5 +:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E +:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78 +:1012D000920263FF018A330AAB5064BEF92CD0132B +:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7 +:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E +:101300008A362FD2097CA3077AC9027EFB01C0B1BD +:1013100065BEC38835DBA0AE8E78EB01B1AB89D753 +:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F +:10133000027DEB01C0C165CE9DC090292467C0200D +:10134000D10F88378C3698140CE90C29161408F83C +:101350000C981D78FB07281214B088281614891DD4 +:101360009F159B16C0F02B121429161A2B161B8BD7 +:10137000147AE30B7AE90688158E1678EB01C0F132 +:1013800065F1BA29121A2F12118A352E121B9A1AD8 +:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED +:1013A000881AC0F098107AE30A7EA9052A12017AF9 +:1013B0008B01C0F164F08160018389368B37991706 +:1013C0000BE80C981F09C90C29161578EB07281291 +:1013D00015B088281615D9C09A199E188A1F2E1282 +:1013E000152A161A2E161BDAC0C0E08C177F930B35 +:1013F0007FA90688188F1978FB01C0E165E13E29B5 +:10140000121A2F12138A352E121B9A1BAFEE2F12AF +:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3 +:1014200098127AE30A7EA9052A12037A8B01C0F189 +:1014300065F10A2E12162E16192A121B005104C02D +:10144000E100EE1AB0EE2E16170EFF132F16180F2E +:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F +:10146000AA2A161B2C161A63FC5E00007FB30263C7 +:10147000FE3163FE2B7EB30263FC3063FC2A000066 +:101480006450C0DA20DBC058168AC020D10FC0914A +:1014900063FD7A00C09163FA44DA20DB70C0D12E7C +:1014A0000A80C09A2924682C7007581575D2A0D1DB +:1014B0000F03470B18ED4DDB70A8287873022B7DC6 +:1014C000F8D9B063FA6100002A2C74DB40580EEEA4 +:1014D00063FAE4000029221D2D25027B9901C0B08A +:1014E000C9B62CB00728B000DA2006880A28824C3A +:1014F000C0D10B8000DBA065AFE7C020D10FC09149 +:1015000063FBFF00022A0258024C0AA202060000F6 +:10151000022A025802490AA202060000DB70DA2001 +:10152000C0D12E0A80C09E2924682C7007581554FB +:10153000C020D10FC09463FBC9C09663FBC4C096A2 +:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA +:10155000C2A02AB4002C200C63FF27008D358CB765 +:101560007DCB0263FDD263FC6D8F358ED77FEB029E +:1015700063FDC563FC6000006C1004C020D10F0047 +:101580006C1004C020D10F006C10042B221E2822E6 +:101590001DC0A0C0942924062A25027B8901DBA056 +:1015A000C9B913ED04DA2028B0002CB00703880A6B +:1015B00028824CC0D10B8000DBA065AFE7C020D1F2 +:1015C0000F0000006C10042C20062A210268C805B8 +:1015D00028CCF965812E0A094C6591048F30C1B879 +:1015E0000F8F147FB00528212365812716ECF3297E +:1015F000629E6F98026000F819ECEF2992266890BD +:10160000078A2009AA0C65A0E72A629D64A0E12B45 +:10161000200C0CB911A6992D92866FD9026000DBBF +:101620001DECE70DBD0A2DD2A368D0078E200DEE6C +:101630000C65E0C7279285C0E06470BF1DECEC68C4 +:10164000434E1CECEB8A2B0CAA029A708920089955 +:10165000110D99029971882A98748F329F752821EB +:1016600004088811987718ECDC0CBF11A6FF2DF246 +:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3 +:1016800074DB40580E81D2A0D10FC020D10F0000D2 +:101690000029CCF96490B12C20668931B1CC0C0CB6 +:1016A000472C24666EC60260008509F85065807F6D +:1016B0001CECD18A2B0F08400B881008AA020CAA38 +:1016C000029A7089200899110D99029971883398AE +:1016D000738C329C728A2A9A748934997563FF7D5F +:1016E00000CC57DA20DB30DC4058155FC020D10F2A +:1016F00000DA20C0B65815EE63FFE500DA20581571 +:10170000EC63FFDC00DA20DB30DC40DD5058167A79 +:10171000D2A0D10FC858DA20DB305814C72A2102D2 +:1017200065AFBDC09409A90229250263FFB200007C +:101730002B21045814731DECADC0E02E24668F30AD +:101740002B200C0F8F1463FF66292138C088798302 +:101750001F8C310CFC5064CF562B2104C0C0581490 +:10176000681DECA2C0E08F302B200C0F8F1463FF9C +:101770003E2C20662B2104B1CC0C0C472C2466583F +:1017800014601DEC9AC0E02E24668F302B200C0FC5 +:101790008F1463FF1A0000006C1004C0B7C0A116BC +:1017A000EC9615EC88D720D840B822C04005350209 +:1017B0009671957002A438040442C94B1AEC7B1947 +:1017C000EC7C29A67EC140D30F6D4A0500808800BD +:1017D000208C220A88A272D10FC05008A53875B09B +:1017E000E363FFD76C10069313941129200665520A +:1017F00088C0716898052A9CF965A29816EC6F2933 +:1018000021028A1309094C6590CD8AA00A6A512ADF +:10181000ACFD65A0C2CC5FDB30DA208C115815120C +:10182000C0519A13C7BF9BA98E132EE20968E060CE +:101830002F629E1DEC606FF8026000842DD2266836 +:10184000D0052F22007DF9782C629DC79064C0706E +:101850009C108A132B200C2AA0200CBD11A6DD0A97 +:101860004F14BFA809880129D286AF88288C09792E +:101870008B591FEC520FBF0A2FF2A368F0052822E4 +:10188000007F894729D285D4906590756000430018 +:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF +:1018A000BF0A6E96102FF2A368F00488207F890586 +:1018B00029D285659165DA2058157DC95C6001FFE4 +:1018C00000DA20C0B658157A60000C00C09063FFA3 +:1018D000B50000DA205815766551E48D138C11DBC4 +:1018E000D08DD0022A020D6D515813E39A1364A1D2 +:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF +:10190000FD00C091C0F12820062C2066288CF9A784 +:10191000CC0C0C472C24666FC6098D138DD170DE5C +:1019200002290A00099D02648159C9D38A102B211A +:10193000045813F38A13C0B02B24662EA2092AA0E0 +:10194000200E28141CEC298D1315EC1DC1700A778C +:101950003685562DDC28AC2C9C12DED0A8557CD3C5 +:10196000022EDDF8D3E0DA40055B02DC305BFF8A53 +:10197000D4A028200CB455C0D02B0A882F0A800C84 +:101980008C11A6CC29C285AF3FAB9929C6851CEC2A +:1019900012DEF0AC882D84CF28120229120378F3CE +:1019A000022EFDF8289020D3E007880CC1700808AB +:1019B00047289420087736657FAB891313EC10898C +:1019C00090C0F47797491BEC0EC1CA2821048513F7 +:1019D000099E4006EE11875304881185520E880235 +:1019E0000C88029BA09FA18F2B9DA598A497A795DB +:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50 +:101A00001106CC082BC2852DE4CF2BBC202BC6851C +:101A10002A2C748B11580D9CD2A0D10F28203DC0C8 +:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2 +:101A300088201EEBE38F138EE48FF40888110A8848 +:101A4000020F8F14AFEE1FEBF098910FEE029E90F5 +:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6 +:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0 +:101A70002F24722C2502C020D10F871387700707EF +:101A80004763FD6E282138C099798B0263FE9ADD89 +:101A9000F063FE9500DA20DB308C11DD505815968E +:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F +:101AB000AA2E0A802A2468DA205813F1D2A0D10F66 +:101AC000C020D10F6C1006292102C0D07597102AB2 +:101AD00032047FA70A8B357FBF052D25020DD90261 +:101AE000090C4C65C18216EBB41EEBB228629EC095 +:101AF000FA78F30260018829E2266890078A2009B3 +:101B0000AA0C65A17A2A629DDFA064A1772B200C24 +:101B10000CBC11A6CC29C286C08C79830260015707 +:101B200019EBA709B90A2992A368900788200988A8 +:101B30000C65814327C2851CEBA964713A89310980 +:101B40008B140CBB016FB11D2C20669F10B1CC0C07 +:101B50000C472C24666EC60260014009FF5065F1F7 +:101B60003A8A102AAC188934C0C47F973C18EBA974 +:101B70001BEBA88F359C719B708B209D7408BB025A +:101B80009B72C08298751BEBA40F08409B730F8853 +:101B90001198777FF70B2F2102284A0008FF022FA8 +:101BA0002502C0B4600004000000C0B07E97048F1E +:101BB000362F25227D970488372825217C9736C02B +:101BC000F1C0900AF9382F3C200909426490861927 +:101BD000EB7618EB7728967E00F08800A08C00F05A +:101BE0008800A08C00F08800A08C2A629D2DE4A2C1 +:101BF0002AAC182A669D89307797388F338A321835 +:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0 +:101C10008498E1882B9DE59AE69FE71AEB78099F67 +:101C20004006FF110FCC020A880298E2C1FC0FCCDB +:101C3000022CE604C9B82C200C1EEB670CCA11AEAE +:101C4000CC06AA0829A2852DC4CF09B90B29A685DF +:101C5000CF5CC020D10FC081C0900F8938C0877978 +:101C6000880263FF7263FF6600CC57DA20DB30DC4A +:101C7000405813FDC020D10FDA2058148D63FFE8BF +:101C8000C0A063FE82DA20C0B658148963FFD90071 +:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7 +:101CA000045813171EEB44C0D02D246663FEB10008 +:101CB0006C1006D62019EB3F1EEB4128610217EB92 +:101CC0003E08084C65805F8A300A6A5169A3572B29 +:101CD000729E6EB83F2A922668A0048C607AC9343E +:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A +:101CF000A7DD28D2860EBE0A78FB269C112EE2A311 +:101D00002C160068E0052F62007EF91522D285CFDF +:101D10002560000D00DA60C0B6581465C85A60012D +:101D20000F00DA60581462655106DC40DB308D30FC +:101D3000DA600D6D515812D0D3A064A0F384A1C015 +:101D40005104044763FF6D00C0B02C60668931B157 +:101D5000CC0C0C472C64666FC60270960A2B61048B +:101D60005812E7C0B02B64666550B42A3C10C0E737 +:101D7000DC20C0D1C0F002DF380F0F4264F09019B0 +:101D8000EB0A18EB0B28967E8D106DDA0500A08803 +:101D900000C08CC0A089301DEB1A77975388328C15 +:101DA000108F3302CE0BC02492E12261049DE00427 +:101DB00022118D6B9BE59FE798E61FEB1009984079 +:101DC0000688110822020FDD02C18D9DE208220261 +:101DD00092E4B4C22E600C1FEB000CE811A7882C13 +:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8 +:101DF0000F28600CD2A08C1119EAF80C8D11A9885B +:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF +:101E1000C0F00ADF387FE80263FF6C63FF600000F8 +:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C +:101E300063C020D10F0000006C10042920062A2264 +:101E40001EC0392C221D232468C0307AC107DDA0B2 +:101E5000600004000000C0D06E9738C08F2E0A804A +:101E60002B2014C0962924060EBB022E21022B24FF +:101E7000147E8004232502DE307AC10EC8ABDBD08D +:101E8000DA202C0A00580B062E21020E0F4CC8FE39 +:101E90006000690068956528210208084C65805C2F +:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256 +:101EB0002668B0048C207BC95329A29D1FEAC16407 +:101EC000904A9390C0C31DEAD52B21049D9608BB70 +:101ED000110CBB029B979B911CEAD2C08523E4A204 +:101EE0002BA29D2824068DFA282102B0DD2BBC30C0 +:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD +:101F00000F8EF912EAC82E2689C020D10FDA20C020 +:101F1000B65813E7C020D10F6C10062A2006941083 +:101F200068A80528ACF965825029210209094C6589 +:101F3000920ACC5FDB30DA208C1058134BC051D39F +:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F +:101F50008F3A16EA99B1FB64B13128629E6F88020C +:101F60006001ED294C332992266890078A2009AA3E +:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0 +:101F8000B7110677082972867983026001CD0CB9F2 +:101F90000A2992A36890082C220009CC0C65C1BBC9 +:101FA0002772856471B5282006288CF96481E52C98 +:101FB00020668931B1CC0C0C472C24666EC60260B9 +:101FC00001A109F85065819B2A21048CE488361E02 +:101FD000EA7D088914A9CC08084709881019EA92F3 +:101FE0000ECC029C7099718C2A1EEA9008CC020ECD +:101FF000CC029C722E302C293013283012049910F8 +:102000000688100CEE109F740EAE0209880208EECE +:10201000029E738C3704AA119C758938C0F4997696 +:102020008839C0C1987718EA828E359C7B9E780EDD +:102030008E1408EE029E7A8E301CEA7177E73088A3 +:102040003289339C7C9F7D0E9C4006CC118F2B29BE +:1020500076132D76112876120CAA0218EA68C1C9E7 +:102060000CAA022A761008FF029F7EC0AA60000117 +:10207000C0A6A4BC0CB911A6992892852DC4CF087E +:10208000A80B289685655100C020D10F2B200C0C81 +:10209000B7110677082A72860CB90A6FA902600187 +:1020A000182992A36890082A220009AA0C65A109A0 +:1020B0002A728564A1032C203D0C2C4064C08C8CBA +:1020C000350C8C1464C0848FE57CF37F8C360C8CCB +:1020D0001464C0777CF374283013C0FC78F86CC0AB +:1020E00090292467090C4765C0D719EA4718EA45C3 +:1020F0008F208C3508FF110C8C1408FF0288E49F98 +:10210000A1AC8C09CC029CA08C369FA30C8C14AC87 +:102110008809880298A218EA3DA4BC2F72852DC4B4 +:10212000CF2FFC102F76852F210229207208FF0265 +:10213000B2992924722F2502C020D10F00CC57DA82 +:1021400020DB308C105812C8C020D10FC09163FF23 +:102150008FDA20C0B658135663FFE100DA20581317 +:102160005463FFD82B21045811E61EEA152B200CCE +:10217000C0D02D24668F3A63FE4DDA20DB30DC4080 +:10218000DD505813DDD2A0D10F2A2C748B10580BC0 +:10219000BED2A0D10F292138C08879832E8C310C72 +:1021A000FC5064CE222B2104C0C05811D5C0D01ED3 +:1021B000EA048F3A2B200C63FE0DDA2058133C639F +:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7 +:1021D0002104B1CC0C0C472C24665811C91EE9F817 +:1021E0002B200CC0D02D24668F3A63FDDA0000004E +:1021F0006C10089514C061C1B0D9402A203DC04080 +:102200000BAA010A64382A200629160568A8052C9D +:10221000ACF965C33F1DE9EA6440052F120464F27E +:10222000A02621021EE9E606064C6562E615E9E2F3 +:102230006440D98A352930039A130A990C6490CCEA +:102240002C200C8B139C100CCC11A5CC9C112CC2F7 +:1022500086B4BB7CB3026002D78F100EFE0A2EE25A +:10226000A368E0098620D30F0E660C6562C2881150 +:102270002882856482BA891364905EDA80D9308CB2 +:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007 +:102290007FB718B88A293C10853608C6110E660229 +:1022A0009681058514A5D50F550295800418146DE7 +:1022B0008927889608CB110888140EBB02A8D82954 +:1022C0009C200F88029BA198A088929BA308881449 +:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8 +:1022E000131EE9BD86118D10286285AEDD08FF0B37 +:1022F0002CD4CF2821022F66858B352A207209889D +:1023000002ABAA2825022A2472C020D10F29529E8E +:1023100018E9A96F980260020B28822668800829B4 +:10232000220008990C6591FC2A529DC1CE9A126434 +:10233000A1F22B200C2620060CB8110588082D824E +:10234000860EBE0A7DC3026002052EE2A368E00885 +:102350002F22000EFF0C65F1F6288285D780DE80E3 +:102360006482009816266CF96462012C206688311C +:102370002CCC010C0C472C24666EC6026001BC08F4 +:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC +:10239000048B2D2830102F211D0C88100BFB090AEF +:1023A00088020988020CBB026441529B709D71989F +:1023B00072C04D8D35D9E064D06ED730DBD0D830C7 +:1023C0007FD714273C10BCE92632168C3996E69C40 +:1023D000E78A37B4382AE6080B131464304A2A8295 +:1023E0001686799A9696978C778A7D9C982B821779 +:1023F0002C7C209A9A2A9C189B99867BB03B298C2E +:10240000086DB9218BC996A52692162AAC18B899E1 +:102410009BA196A08BC786CD9BA22B921596A49BC1 +:10242000A386CB2CCC2026A605C0346BD4200D3B34 +:102430000C0DD8090E880A7FB705C0909988BC8812 +:10244000C0900B1A126DAA069988998B288C18C017 +:10245000D01BE97A1CE97916E96EB1FF2A211C2309 +:10246000E6130F0F4F26E6122F251D7FA906C0F099 +:10247000C08028251D05F6111AE9678F202BE61567 +:102480002CE6162DE61726E6180AFA022AE6142983 +:102490002006299CF96490F829200C8D14C0801A1C +:1024A000E94E0C9C11AA99A5CCDA202BC285289460 +:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF +:1024C000D10F8A356FA546D8308BD56DA90C8A8679 +:1024D0000A8A14CBA77AB335288C10C080282467C9 +:1024E000080B4765B10BDA20DB302C12055811DEE2 +:1024F000D3A0C0C1C0D02DA4039C1463FD22863696 +:102500006461059B709D719872C04D63FEA4C0818B +:1025100063FFC9008814CC87DA20DB308C15581192 +:10252000D2C020D10FDA20C0B658126163FFE40098 +:1025300000DA208B1058125E63FFD8009E178A12B3 +:102540002B21045810EF8E17C09029246663FE34A7 +:10255000C08063FE06DA20DB308C15DD505812E6B1 +:10256000D2A0D10FDA2058125263FFA7002B2138D6 +:10257000C0A87BAB026001048C310CFC5064CE041B +:102580008A122B2104C0C098175810DD8E1763FDE6 +:10259000F32D21382DDCFF0D0D4F2D253865DEF78D +:1025A00028206A7F87050826416460A3C09016E949 +:1025B000141CE9232A200723E61BB1AA0CFD0226DE +:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F +:1025D0001C8B260A0A472BE6208B282AE53E2BE691 +:1025E000212924072820062A2064688346B44463EE +:1025F000FEA5DB30DA208C158D142E0A80C08E28C3 +:10260000246858111FD2A0D10F2E7C4819E8ED2A5A +:1026100032162B76129D712D761328761489960A20 +:102620002A14AA990C9902997069ED71C14663FD4B +:102630008100000064AFB51DE8E22C20168DD20A9F +:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB +:102650002B21046EB81E2C2066B8CC0C0C472C2401 +:1026600066C9C09E178A125810A68E17C0348F20D4 +:10267000C0D02D2466C06826240663FF2E8A122B44 +:1026800021042C20669817B1CC0C0C472C246658DA +:10269000109C8E178716C0D02D246663FCE68D35FE +:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6 +:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE +:1026C0001027D63006460127D6320A440117E8DF24 +:1026D00024D631A74727D63324F21596B794B68D62 +:1026E000C3BCBB9DB58D35299C107D83C22F211D98 +:1026F000C14663FD330000006C1006292006289CAB +:10270000F86582BF2921022B200C09094C6590E154 +:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D +:102720000260028C19E8A609B90A2992A3689007E9 +:102730008C2009CC0C65C27829A2856492722D6226 +:102740009E1AE89C6FD80260026E2AA22629160102 +:1027500068A0082B22000ABB0C65B25C29629DC1EF +:102760008C6492542A21200A806099102C203CC746 +:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4 +:10278000260DBD112DDC1C0D0D410EDD038E27B174 +:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E +:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8 +:1027B000A16000093E01073EB1780987390B770A0D +:1027C00077EB0260020A2C2123282121B1CC0C0CCA +:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD +:1027E000DB30581095292102CC96C0E80E9E022EAF +:1027F0002502CC57DA20DB30DC4058111BC020D139 +:102800000F2C20668931B1CC0C0C472C24666EC687 +:10281000026001D309FD5065D1CD2F0A012E301180 +:1028200029221464E01128221B090C4400C1040071 +:10283000FA1A0A880228261B2E3010C0A0C0B094B5 +:102840001295131CE85F88302CC022088D147787FE +:1028500004C0F10CFA38C041C0F225203CC0840805 +:1028600058010F5F010F4B3805354007BB10C0F012 +:10287000084F3808FF100FBB0228ECFEC0F0084FCD +:1028800038842B0BA8100AFF102A21200F88020B76 +:10289000880208440218E86E8F1108440228212596 +:1028A0000A2A140828140488110A88022A21049488 +:1028B000F08B2004E41008BB1104BB02C04A04BB27 +:1028C000029BF1842A08AB110BEB0294F40A541119 +:1028D0000B44020555100D1B4094F707BB100B5518 +:1028E00002085502C08195F68433C05094F3B19428 +:1028F0008B3295F898F99BF2C080C1BC24261499BC +:10290000FA9BF598FB853895FC843A94FD8B3B9BAC +:10291000FE883998FF853525F6108436851324F610 +:10292000118B3784122BF612C0B064C07E893077C9 +:1029300097438D3288332E30108F111CE83109995E +:10294000400699112CF614C0C42CF6158C2B2DF6CC +:102950001A28F61B2BF61904A81109880208EE02A2 +:1029600019E827C18008EE0209C90229F6162EF6D9 +:1029700018C09E600001C09A2F200C18E8170CFEAA +:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1 +:1029900085C87F8A268929A7AA9A260A990C090937 +:1029A00048292525655050C020D10F00C09A63FFEB +:1029B000C6DA2058113F63FE38DA20C0B658113C01 +:1029C00063FE2E0068973C2B9CFD64BE24C020D182 +:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B +:1029E000DC3865CDE063FE098A102B2104580FC442 +:1029F000C0B02B246663FE21DB402A2C745809A248 +:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4 +:102A100020D10F006C1004290A801EE80E1FE80E5A +:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B +:102A3000029ED19CD0C051C07013E80A14E8091856 +:102A4000E8072AB285A82804240A234691A986B853 +:102A5000AA2AB685A98827849F25649FD10F0000E4 +:102A60006C100AD630283010292006288CF9648290 +:102A70009B68980B2A9CF965A1B2022A02580FABF9 +:102A800089371BE7CFC89164520E2A21020A0C4CE9 +:102A900065C2588D3019E7C874D7052E212365E229 +:102AA0009E2F929E1AE7C46FF8026002532AA22654 +:102AB00068A0082C22000ACC0C65C2442A929D64AE +:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0 +:102AD00018E7BC64B0052880217B8B432B200C18A1 +:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A +:102AF0002EE2A368E0052F22007EF9372CC2859CC8 +:102B00001864C2332B212F87660B7B360B790C6F31 +:102B10009D266ED2462C203D7BC740CE5560001EC0 +:102B20002A200CC1B28C205811229A1864A2458D1B +:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA +:102B4000E06000022E60030EDB0C6EB20EDC700C37 +:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2 +:102B6000C1C82D21205810BC8C268B279A160CBB6F +:102B70000C7AB3348F18896399F3886298F28E6562 +:102B80009EF82D60108A189D1768D729C0D09DA97E +:102B90002C22182B22139CAB9BAA97A58E667E73C2 +:102BA00002600097CF5860001FDA208B1658108201 +:102BB00065A13863FFBDC081C0908F18C0A29AF98B +:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6 +:102BD0001026C051D6A0C0C02BA0102CA4039B1758 +:102BE0002C1208022A02066B02DF702D60038E177A +:102BF0009D149E100CDD11C0E0AD6D2DDC20580140 +:102C0000188C148B16ACAC2C64038A268929ABAAC9 +:102C10000A990C9A26886609094829252507880CEF +:102C200098662F2218A7FF2F261863FE96DA20DB5E +:102C300030DC40DD50581130D2A0D10FC0302C20F4 +:102C4000668961B1CC0C0C472C24666EC60260000C +:102C5000D2C03009FD5065D0CA8E6764E0696470E7 +:102C600066DB608C18DF70DA202D60038E170CDDB8 +:102C7000119E10AD6D2DDC201EE7755800F923263E +:102C800018DA208B16DC402F2213DD50B1FF2F26DF +:102C900013580FC5D2A0D10F0028203D0848406529 +:102CA0008DE76F953EDA308DB56D990C8CA80C8C44 +:102CB00014CACF7CD32D2AAC10C090292467090DEB +:102CC0004764DDC5600092002C1208066B022D6C73 +:102CD00020077F028E17DA209E101EE75C58007DC9 +:102CE00063FF9A00C09163FFD1000000655081DA54 +:102CF00020DB60DC40580FDCC020C0F02FA403D1E3 +:102D00000FDA20C0B658106A63FFE000006F95022A +:102D100063FD6CDA20DB30DC40DD50C4E0580F5836 +:102D2000D2A0D10F8A152B2104580EF52324662832 +:102D30006010981763FF2100DA2058105D63FFAB25 +:102D4000C858DB30DA20580F3C2A210265AF9CC0FE +:102D50009409A90229250263FF91DB30DC40DD5094 +:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9 +:102D70000FC020D10FDA202B200C58107263FF6B8C +:102D80006C1004282006C062288CF8658125C0508C +:102D9000C7DF2B221BC0E12A206B29212300A104BD +:102DA000B099292523B1AA00EC1A0BC4010A0A44E0 +:102DB0002A246B04E4390DCC030CBB012B261B64C5 +:102DC000406929200C1BE6FC0C9A110BAA082FA2C3 +:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2 +:102DE00068B0082C22000BCC0C65C0A42BA2851D5A +:102DF000E71E64B09B8C2B2421040DCC029CB08870 +:102E000020C0C50888110C880298B1882A0844118E +:102E100098B48F3494B79FB5C0401EE6EF2DA285BD +:102E20000E9E0825E4CF2DDC282DA6852921020938 +:102E3000094C68941A689820C9402A210265A00BA1 +:102E40002A221E2B221D7AB10265A079C020D10F43 +:102E50002C212365CFDE6000082E21212D21237E29 +:102E6000DBD52B221E2F221D2525027BF901C0B0A8 +:102E700064BFC413E6D02CB00728B000DA20038862 +:102E80000A28824CC0D10B8000DBA065AFE763FF4E +:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3 +:102EA000A08B2008BB1106BB029BA1893499A263A9 +:102EB000FF790000262468DA20DB30DC40DD505842 +:102EC000108ED2A0D10FDA202B200C580FF9C02081 +:102ED000D10F00006C1006073D14C080DC30DB40D1 +:102EE000DA20C047C02123BC3003283808084277C5 +:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB +:102F0000D30F6DDA0500508800308CC0E0C020255A +:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA +:102F2000220F8940941077F704C081048238C0F1E1 +:102F30000B2810C044C02204540104FD3802520181 +:102F400002FE3808DD10821C07EE100E6E020EDD48 +:102F500002242CFEC0E004FE380AEE100E88020D9A +:102F600088028DAB1EE69B08D8020E880298B0C07E +:102F7000E80428100E5E0184A025A125084411084C +:102F80004402052514045511043402C0810E8E3903 +:102F900094B18FAA84109FB475660C26A11FC0F24D +:102FA000062614600009000026A120C0F20626149F +:102FB0000565020F770107873905E61007781008C5 +:102FC000660206550295B625A1040AE611085811B5 +:102FD00008280208660296B7C060644056649053A1 +:102FE000067E11C0F489C288C30B340B96459847FE +:102FF000994618E6829F410459110E99021FE680F6 +:10300000020E4708D80298420E99029F40C1E00E76 +:10301000990299442FA00CB4380CF91114E66F1ED4 +:10302000E666A4FFAE992E928526F4CF0E880B2873 +:103030009685D10F2BA00C1FE6601CE6670CBE1115 +:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552 +:10305000D10FC08005283878480263FEA263FE962F +:103060006C1006C0C06570F18830C03008871477D6 +:103070008712C0B0C0A619E652299022C030CC9762 +:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1 +:103090008225203C0B3F109712831CC070085801FA +:1030A0000D5D01089738C0800B98380777100488A9 +:1030B00010086802087702C0800D98382D3CFE0881 +:1030C00088100D9E388D2B0AEE1008EE0207EE02D6 +:1030D0000CB8100FDD02053B400EDD029D4089203B +:1030E000043D100899110D99022D210409A9020827 +:1030F000DD119941872A05B9100D3D020ABB110D5A +:10310000BB02087702974428212587120828140457 +:103110008811071E4007EE100E99027566092621D8 +:103120001F062614600006002621200626140868C3 +:10313000029B47098802984629200CD2C0C0800C07 +:103140009E111BE6251FE61CAB99AFEE2DE28528EC +:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC +:103160008E51CAE0B2AAB1BB2DDC108F500E78365A +:10317000981008770C9FD898D989538F5299119934 +:10318000DB9FDA7E8309B1CC255C10C97763FFCF62 +:1031900088108D1108E70C9751AD8DD7F078DB01C1 +:1031A000B1F79D5397528830C03008871408884083 +:1031B000648ED565BEC963FEBC0000006C1004D7E8 +:1031C00020B03A8820C0308221CAA0742B1E2972F8 +:1031D000046D080FC980C9918575B133A2527A3B3D +:1031E0000B742B0863FFE900649FECD10FD240D130 +:1031F0000F0000006C100AD6302E3027D950DA406C +:1032000015E5F02430269A1529160464E00264932B +:10321000732920062A9CF865A3CE2A2102270A04D6 +:103220000A0B4C65B3978C3074C7052D212365D4E8 +:10323000A0C0A62B0A032C2200580F3664A3B9178E +:10324000E5DE8E389A1664E3BA2F6027285021C92C +:10325000F37E8311C2B08C202A200C580F55D7A0C2 +:10326000CDA16004A200C2B08C202A200C580F29E6 +:10327000D7A064A4862F212E8B680FBF360FB90C00 +:103280006F9D54296027D5B06E920528203D7B8F15 +:103290004CDA20DB50C1C42D211F580EEF8B269A2B +:1032A000189A1989272AAC380B990C7A9353896399 +:1032B000C08099738F6298789F728E659E798D67B2 +:1032C0009D7B8C6695759C7A8E687E53026000B1FA +:1032D0008B1465B050600038DBF063FFA5008A14E2 +:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E +:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344 +:10330000FFE2DA208B18580EAC65A2B163FF9E0075 +:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6 +:103320002D16042CA403DC70DA20DB60DF502D6046 +:1033300003C0E09E109D171EE5B90CDD110D6D0850 +:103340002DDC285BFF478E668F678817AF5FA8A8C4 +:1033500028640375FB01B1EE8A189E669F67892673 +:103360008829AA9909880C99268E6808084805EECC +:103370000C28252515E5939E6865EECC63FEE600D6 +:103380000000C9432F21232B21212FFC010F0F4FB8 +:103390002F25237FBB026003142C20668961B1CCEA +:1033A0000C0C472C24666EC60260022809FD50658D +:1033B000D22264E1B62E602764E1B0DC70DF50DA1F +:1033C00020DB601EE5AB2D6003C08098100CDD1182 +:1033D000AD6D2DDC285BFF22644181C0442B0A00C7 +:1033E0008C202A200C580ECB0AA70265A00FC0B073 +:1033F0002C22002A200C580EC7D7A064AFEFDA2089 +:10340000C1BCC1C82D21208F188E268929AFEE9E00 +:10341000260E990C090948292525580E8FC090C001 +:1034200050C0C288609A191EE566C0A12EE022082D +:103430008F14778704C0810E8938C0800B93102DBC +:10344000203C2921200CDC0104DB010929140BA8F4 +:10345000380CA5380D3D401CE57E8B2B08881007E5 +:1034600055100855020533022821250F154003BBCE +:10347000020CBB0207551005D3100828140ADD11F1 +:103480000488110988020533022921040833029BAC +:1034900070C0808A201BE57708AA110BAA029A71D6 +:1034A000C0A1852A9376957408931103DD020ADD85 +:1034B000029D778C63C1DC9C738B6298789A799BB0 +:1034C00072232214C0C0B1352526149C7B9D7593B0 +:1034D0007A2B621A9B7C2A621C9A7D28621D987E38 +:1034E00025621B957F2362172376102D62182D7697 +:1034F000112C62192C761264E0B98E6077E73DC01A +:10350000FE13E53E1DE53FC1818A628B6304951180 +:103510000E9C4006CC110C5502247615085502C0AD +:10352000802D76148D2B2B761B2A761A287619255A +:10353000761803DD022D76166000030000C0FA2E17 +:10354000200C19E52518E51CA9E90CEE11A8EEC020 +:10355000802DE2852894CF0DFD0B2DE685DA208B9A +:10356000198C158D14580D90D2A0D10FDC70DF503E +:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1 +:103580005563FE53002B203D0B4B4065BC826FE51D +:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D +:1035A000F3162AAC10C090292467090F4764FC6009 +:1035B00060015F00C0FA63FF85C09163FFE8881473 +:1035C000658168DA20DB608C15580DA7C020C0909B +:1035D00029A403D10F8A162B2104580CC9C0A02A94 +:1035E00024668E6863FDCA00002B9CF965B0FDDA85 +:1035F00020580CCE63FC220000DA20C0B6580E2CF6 +:1036000063FFBA002B200C0CBE11A7EE2DE286C181 +:10361000C27DC30260011819E4E909B90A2992A31D +:103620006890082A220009AA0C65A10326E2856495 +:1036300060FD2C20668931B1CC0C0C472C24666FC0 +:10364000C60270960C8A162B2104580CADC0D02DE2 +:1036500024668E3077E74D1CE4E91BE4E98F32885D +:1036600033C0A42D21040E994006991104DD1109DF +:10367000DD029A61C19009DD029B60C0908B2B9D99 +:10368000649F66986799650CBB029B6228200C1AA0 +:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B +:1036A000FC202F86858A1465A0A6C020D10FB0FC0F +:1036B0008B142C2523C8B7022A02066B02580CDE95 +:1036C0002A210265AEF7C0D80DAD022D250263FE9A +:1036D000EC008E14C8E8DA20DB30580CD72A21021F +:1036E00065AEDA07AF022F250263FED100DA20DBD8 +:1036F000308C158D14580E80D2A0D10FDA202B20DB +:103700000C580DEB63FEB600DA202B200C580E0D82 +:1037100063FEAADA20DB308C152D12042E0A8028D5 +:103720000A00282468580CD663FAE500C020D10F9F +:10373000DA20580DDF8914CD92DA20DB308C155851 +:103740000D4ADBA0C020C0A02AB403D10FC020D1F5 +:103750000F2A2C748B1558064CD2A0D10F000000F4 +:103760006C100E28210224160108084C6583A91F3D +:10377000E49229F29E6F98026003AD1EE48E29E266 +:10378000266890082A220009AA0C65A39B24F29DB2 +:103790006443952A31160A4B412B240BB4BB0B0B07 +:1037A000472B240C0CB611AF66286286C1CC78C3B7 +:1037B0000260037F19E48209B90A2992A36890077D +:1037C0008C2009CC0C65C36B276285647365293135 +:1037D00009C0D02D24668C3599139C2A88369C14F8 +:1037E000982B8E3798159E169E2C8C38C0E10C5C59 +:1037F000149C179C2D88392925042E251D28251C4D +:103800002C3028C0822C243C2930290C0C4708C8B5 +:103810000129243D29311598189912090841089960 +:103820000C299CEC29251F7EC725921C8212282A70 +:1038300000082060991B01023E00093EB128098260 +:1038400039891B0E221102990C821C29251F821C0A +:10385000941D951E24211F15E4880451609A10C1FF +:10386000802B1610252014961F05054301063E00E7 +:103870000D3EB16B0DB6398B3C2D9CFC08663606AF +:10388000441C893D2E26132E26142E26152E246B1D +:1038900025241406D61CC05025261825261B2524B1 +:1038A000672524682832112525232525242525254B +:1038B00025252C2925222D25202B252124252E26A2 +:1038C000252F14E46F16E46D1BE45298192D211C6A +:1038D000C08498719B70892095759577957F967CAB +:1038E000967E98799B7894731BE46714E4680C388F +:1038F000400288100C064015E464016610947D9B1C +:1039000074841D1BE444086602957B18E431851E0F +:103910000B99029972997A0866022B121096768694 +:103920001F6FD2026001C8C0A0991A6D080AB1AA1F +:1039300000A10400E81A7D8B0263FFEE891AC0E043 +:10394000961F1DE43E2B1610951E941D28203D2920 +:10395000761A297612C040C051C0B22D76130806DF +:10396000408D170B8801065E380AEE101BE44A08EA +:103970005438B0A609661188140B44102B761B042A +:10398000EE028B1614E44308DA1406EE020D8810DA +:103990002A761E86131AE41C04EE020D66110866D0 +:1039A000022E76160D14141EE41A0D44110BD814B1 +:1039B0000866020A44022E76182E76102476172600 +:1039C000761FC084287619287611C76F0C24400F03 +:1039D00044111CE3FB26761D26761C2676152676DA +:1039E000148A262676242676252976222E762028E5 +:1039F00076218E1888150DB91016E4278BC70D880F +:103A0000110E5E39ADBB851904EE022676230988B6 +:103A100002861F89102876260A04480544110505E8 +:103A2000480E551105440204EE02851E841D2E76B3 +:103A3000272820069B2D29246A2E31172B12102EA1 +:103A40002538CC83C0D02D2407C0D7090840648016 +:103A50008E9A290928416480AA64E0B42D2406C006 +:103A60009809E9362D0AA02A628501C404ADAA2D61 +:103A700021042A668508DD11883F8E3E2732100812 +:103A8000EA1800C40408E8180088110ECE5308771D +:103A900002C08308DD029D4118E401090D4E9840E3 +:103AA00088209A4397449D4517E3FE1DE3CB058884 +:103AB0001108EE02ADBDC08007EE029E4228D4CFB1 +:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963 +:103AD00097CA28A4A268711C655060C020D10F004D +:103AE0002D2406C080C09809E9360E893863FF731B +:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41 +:103B0000D600000065EF54C098C0D82D240663FF8E +:103B1000522D2406C09063FF4ACC57DA20DB308C4C +:103B200011580C51C020D10F00DA20C0B6580CE05B +:103B300063FFE500DA20580CDE63FFDC2A2C748B6F +:103B400011580551D2A0D10F6C10062820068A33D7 +:103B50006F8202600161C05013E39729210216E3CE +:103B600096699204252502D9502C20159A2814E331 +:103B7000948F2627200B0AFE0C0477092B712064F2 +:103B8000E1398E428D436FBC0260016F00E104B0E9 +:103B9000C800881A08A80808D80298272B200668A9 +:103BA000B32ECE972B221E2C221D0111027BC901A0 +:103BB000C0B064B0172CB00728B000DA2003880A20 +:103BC00028824CC0D10B8000DBA065AFE7C020D1BC +:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC +:103BE000C02B200C0CBC11A6CC28C2862E0A08784B +:103BF000EB611EE3720EBE0A2EE2A368E0052822E6 +:103C0000007E894F29C2851EE37E6490461FE38CA7 +:103C10009E90C084989128200A95930F88029892CC +:103C20008E200FEE029E942F200788262F950A984B +:103C3000969A972E200625240768E3432921022A15 +:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B +:103C500063FF4E002E2065CBEDC082282465C9F697 +:103C600005E4310002002A62821BE36D2941020B48 +:103C7000AA022A668209E43129210263FF23000097 +:103C800064DFB88F422E201600F1040DEE0C00EE1A +:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5 +:103CA000B0293221283223B4992936217989A92BC8 +:103CB00032222B362163FFA0C020D10F9F2725245D +:103CC00015ACB82875202B2006C0C12EBCFE64E0C0 +:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE +:103CE000D0868E290EAE0C66E089C0F128205A28B5 +:103CF0008CFE08CF3865FEE863FF580000E00493AF +:103D000010C0810AF30C038339C78F08D80308A8B1 +:103D10000108F80C080819A83303C80CA8B82875BE +:103D200020030B472B24158310CBB700E104B0BC54 +:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA +:103D4000990209094F29250263FE50002D206A0DB2 +:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2 +:103D6000F163FEEF9F2763FFD02E221F65EE3263C3 +:103D7000FF79000028221F658E2763FF6E25240629 +:103D800029210263FE1B00006C10066571332B4C69 +:103D900018C0C7293C18C0A1C08009A8380808422B +:103DA0006481101CE3011AE3022AC67E2A5CFDD35B +:103DB0000F6DAA0500B08800908C8940C0A00988CA +:103DC000471FE32B080B47094C50090D5304DD1026 +:103DD000B4CC04CC100D5D029D310CBB029B30882D +:103DE000438E2098350FEE029E328D26D850A6DDE8 +:103DF0009D268E40C0900E5E5064E0971CE3111E1D +:103E0000E300038B0BC0F49FB19EB02D200A99B341 +:103E10000CDD029DB28F200CFF029FB48E262D2058 +:103E2000079EB68C282DB50A9CB72924072F20069B +:103E30002B206469F339CBB61DE2E22320168DD224 +:103E40000B330C00D10400331AB48DA3C393292281 +:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1 +:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8 +:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C +:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7 +:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A +:103EA000AFEEACBB22B28529E4CF02820B22B685ED +:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339 +:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF +:103ED00018DD50580A9B8940C08063FEE3066E02DD +:103EE000022A02DB30DC40DD505800049A10DB501F +:103EF000DA70580465881063FEF700006C100692B3 +:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10 +:103F10000BD9A07DA30229ADF875C302600084C04F +:103F2000B0C023C0A09D106D0844B89F0EB80A8D84 +:103F3000900EB70BB8770D6D36ADAA9D800D660C4F +:103F4000D8F000808800708C879068B124B2227706 +:103F5000D3278891C0D0CB879890279C1000708879 +:103F600000F08C9D91CB6FC08108BB0375CB36638D +:103F7000FFB4B1222EEC1863FFD485920D770C8626 +:103F8000939790A6D67D6B01B1559693959260005C +:103F900016B3CC2D9C188810D9D078D3C729DDF85A +:103FA00063FFC100C0238A421BE2C000CD322D4412 +:103FB000029B3092318942854379A1051EE2BC0EF5 +:103FC000550187121BE2AB897095350B9902993226 +:103FD00088420A880C98428676A6A696768F44AFC9 +:103FE000AF9F44D10F0000006C10089311D63088A9 +:103FF00030C0910863510808470598389812282165 +:1040000002293CFD08084C6581656591628A630A56 +:104010002B5065B18B0A6F142E0AFF7CA60A2C2048 +:104020005ACCC42D0A022D245A7FE0026002158961 +:104030002888261FE29F09880C65820F2E200B0F0F +:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C +:1040500099AEDD1EE2991CE2990EDD010DCC37C14F +:1040600080084837B88DB488981089601AE2557B6B +:1040700096218B622AA0219C147BA3179D132A20D2 +:104080000C8B108C20580BCA8C148D13DBA0CEAC7B +:104090006001C4002E200C1BE2480CEA110BAA0898 +:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1 +:1040B000F0052822007F892C2BA28564B0AA876294 +:1040C0008826DE700C7936097A0C6FAD1C8F279B21 +:1040D0001508FF0C77F3197E7B729D139C149B15BA +:1040E000CF56600025C0B063FFD0D79063FFDD00DE +:1040F000009D139C14DA20DB70580B2F8B158C1449 +:104100008D1365A06A8E6263FFCC00DA208B11DC10 +:1041100040580AD5D6A08B15C051DE70DA20DC607D +:10412000DD405BFF768D138C14D9A02E200C1BE292 +:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547 +:104140002EF4CF0B990B29A68563FF1D00DA20DC26 +:1041500060DD40DE708912282007DF50A9882824FE +:10416000075BFF09D2A0D10F00DBE0DA20580B502B +:104170006550EF2A20140A3A4065A0EBDB60DC4072 +:10418000DD30022A025809BCD6A064A0D584A183E0 +:10419000A00404470305479512036351C05163FE11 +:1041A0005C2C2006D30F28CCFD6480A568C704C012 +:1041B000932924062C2006C0B18D641FE2019D279F +:1041C0009D289D298FF29D2600F10400BB1A00F066 +:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10 +:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C +:1041F000BB36C0E20B0B470EBB372B241618E1F978 +:104200000A09450D0B422B240B29240AB4BE2E2487 +:104210000C7D88572920162FCCFDB09D0A5C520DCD +:10422000CC362C246465FDEC0C0C4764CDE618E11B +:10423000E48E2888820C9F0C00810400FF1AAFEEE8 +:104240009E2963FDCF1CE21163FE13001CE20B6389 +:10425000FE0C8D6563FFA500DA202B200C580B396E +:10426000645F0FC020D10F00C020D10FC09329245C +:1042700016C09363FFA000006C1004C06017E1CD6E +:104280001DE1D0C3812931012A300829240A78A1EF +:1042900008C3B27BA172D260D10FC0C16550512654 +:1042A00025022AD0202F200B290AFB2B20142E2098 +:1042B0001526241509BB010DFF0928F1202B241414 +:1042C000A8EE2EF52064A0A92B221E28221D011184 +:1042D000027B8901DB6064B0172CB00728B000DADC +:1042E0002007880A28824CC0D10B8000DBA065AF74 +:1042F000E7DB30DC40DD50DA205800DE29210209FE +:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2 +:10431000372ED02064E02D022A02033B02DC40DD70 +:10432000505800D4D2A0D10F2B2014B0BB2B241492 +:104330000B0F4164F0797CB7CAC0C10C9C022C25DC +:1043400002D2A0D10FC020D10F2E200669E2C126D3 +:1043500024062B221E2F221D29200B2820150D9903 +:10436000092A9120262415AA882895207BF14960E6 +:104370000048B0BB2B24140B0A4164A0627CB70236 +:104380002C25022B221E2C221DD30F7BC901C0B06D +:10439000C9B62CB00728B000DA2007880A28824C5A +:1043A000C0D10B8000DBA065AFE7C020D10F0000BB +:1043B000262406D2A0D10F0000DB601DE18164BF7E +:1043C0004F2CB00728B000DA2007880A28824CC09A +:1043D000D10B8000DBA065AFE71DE17963FF310001 +:1043E00026240663FF9C00006C1004282006260A81 +:1043F000046F856364502A2920147D9724022A02C1 +:10440000DB30DC40DD50580019292102090A4CC874 +:10441000A2C020D10FC0B10B9B022B2502C020D11E +:104420000F00022A02033B022C0A015800D1C9AA3C +:10443000DA20DB30DC40580A0C29A011D3A07E978B +:10444000082C0AFD0C9C012CA411C0512D2014062F +:10445000DD022D241463FFA4DA20DB30DC40DD50C4 +:10446000C0E0580987D2A0D10F0000006C100616DA +:10447000E1521CE152655157C0E117E14E2821027B +:104480002D220008084C6580932B32000B695129BE +:104490009CFD6590872A629E6EA84C2A722668A0B1 +:1044A000027AD9432A629DCBAD7CBE502B200C0CE6 +:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E +:1044C0002FF2A368F0052822007F89072DD285D31B +:1044D0000F65D0742A210419E17AD30F7A9B2EDA62 +:1044E00020580883600035002D21041BE1757DBB39 +:1044F00024DA20C0B658087ECA546001030B2B5042 +:104500002B240BB4BB0B0B472B240C63FFA0DA202E +:10451000580A67600006DA20C0B6580A656550E0A0 +:10452000DC40DB302D3200022A020D6D515808D2DA +:104530001CE123D3A064A0C8C05184A18EA00404B0 +:10454000470E0E4763FF3500002B2104C08B8931D5 +:10455000C070DF7009F950098F386EB8172C2066CB +:10456000AECC0C0C472C24667CFB099D105808E44B +:104570008D1027246694D11EE126B8DC9ED06550AC +:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD +:10459000F119E10518E10728967EB04BD30F6DBAEB +:1045A0000500A08800C08C2C200CC0201DE10B0C45 +:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09 +:1045C000F685D10FC0800AB83878D0CD63FFC1001E +:1045D0008E300E0E4763FEA12A2C742B0A01044D67 +:1045E000025808D72F200C12E0FC0CF911A699A252 +:1045F000FF27F4CF289285D2A008480B289685D1B2 +:104600000FC020D10F0000006C1004C060CB55DB40 +:1046100030DC40055D02022A025BFF942921020979 +:10462000084CC882D2A0D10F2B2014B0BB2B24146D +:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5 +:10464000D2A0D10F0000022A02033B02066C02C076 +:10465000D0C7F72E201428310126250228240A0F5E +:10466000EE012E241458010E63FFA300262406D267 +:10467000A0D10F006C1006282102D62008084C6536 +:10468000809D2B200C12E0CC0CB811A2882A8286C7 +:10469000B5497A930260009719E0C909B90A2992CD +:1046A000A36890082A620009AA0C65A08228828566 +:1046B0001CE0D46480799C80B887B14B9B819B10AF +:1046C000655074C0A7D970280A01C0D0078D380D75 +:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD +:1046E0000F6D4A0500808800908C2E3008C0A00015 +:1046F000EE322E740028600C19E0B80C8D11A2DD8A +:10470000A988C0202CD2852284CFD2A00CBC0B2C2F +:10471000D685D10FC0F0038F387FA0C063FFB400EF +:10472000CC582A6C74DB30DC4058080BC020D10F09 +:10473000DA605809DF63FFE7DD402A6C74C0B0DC43 +:104740007058087F2E30088B1000EE322E7400282F +:10475000600C19E0A10C8D11A2DDA988C0202CD21B +:10476000852284CFD2A00CBC0B2CD685D10F0000A3 +:104770006C1004292014282006B19929241468817A +:1047800024C0AF2C0A012B21022C24067BA004C0DC +:10479000D02D2502022A02033B02044C02C0D0584D +:1047A00000C0D2A0D10FC020D10F00006C1004298E +:1047B0003101C2B429240A2A3011C28378A16C7B4A +:1047C000A1696450472C2006C0686FC562CA572D86 +:1047D00020147CD722DA20DB30DC40DD505BFFA5E3 +:1047E000292102090E4CC8E2C020D10FC0F10F9F51 +:1047F000022F2502C020D10FDA20DB30C0C05BFFC2 +:10480000DC28201406880228241463FFC7292015F9 +:104810001BE06C2A200BC0C09C240BAA092BA120F2 +:104820002C2415AB9929A52063FF9900C020D10F36 +:10483000DA20DB30DC40DD50C0E0580891D2A0D156 +:104840000F0000006C1004CB5513E06725221F0DEC +:10485000461106550CA32326221E25261F06440BAF +:1048600024261E734B1DC852D240D10F280A80C087 +:104870004024261FA82828261E28261DD240D10FF6 +:10488000C020D10F244DF824261E63FFD80000005D +:104890006C1004D620282006C0706E85026000D4FB +:1048A0001DE04E19E04612E0442A8CFC64A1302B36 +:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF +:1048C000B8110288082E828609B90A7EC3026000E8 +:1048D0009A2992A368900509AA0C65A08E28828562 +:1048E000648088B8891BE04A94819B80655155C0DB +:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1 +:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F +:104910000500808800908CC0A029600C0C9C11A21E +:10492000CC2BC285AD990B4B0B2BC6852860062777 +:1049300094CF6881222D6015D2A0C9D2C0E22E6426 +:1049400006D10F00C0F008AF387FB0BD63FFB100E3 +:10495000276406D2A0D10F00D2A0D10F00CC57DA25 +:1049600060DB30DC405808C0C020D10FDA60580945 +:104970005063FFE80028221E29221DD30F789901D9 +:10498000C080C1D6C1C11BE018C122AB6B6480429C +:1049900078913F2A80000CAE0C64E0BB02AF0C643F +:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A +:1049B000E864E0A32FACE764F09D2EACE664E097DA +:1049C0002F800708F80BDA807B83022A8DF8D8A0A5 +:1049D00065AFBC28612308D739D97060007B00001F +:1049E0002B600C0CB811A2882C82862A0A087CAB9A +:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0 +:104A00002A828564A0691FDFFE276504C0E3C0C455 +:104A10002E64069CA11CE02B9FA02E600A97A30C7D +:104A2000EE029EA28F600CFF029FA42E60147AEF0C +:104A30004627A417ADBC2F828527C4CF2FFC202F7B +:104A4000868563FE692A6C74C0B1DC90DD4058072E +:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B +:104A6000C1E0DC4009DE39DD50580805D2A0D10F85 +:104A7000DA6058090F63FEE4290A0129A4170DBF63 +:104A8000082E828527F4CF2EEC202E868564500BCD +:104A90002A6C74DB4058017CD2A0D10FC020D10F0A +:104AA0006C10062B221E28221D93107B8901C0B09A +:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8 +:104AC0000747010E4E01AD2D9E11C0402E0A146401 +:104AD000B06E6D084428221D7B81652AB0007EA13E +:104AE0003B7FA1477B51207CA14968A91768AA1484 +:104AF00073A111C09F79A10CC18B78A107C1AE2908 +:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C +:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C +:104B200089116987BB649FB863FFDC00647FB4634D +:104B3000FFD50000646FD0C041C1AE2AB40063FF4E +:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10 +:104B5000107CB1217AB901C0B0C9B913DF96DA204F +:104B600028B0002CB00703880A28824CC0D10B80E3 +:104B700000DBA065AFE7D240D10F8910659FD463F9 +:104B8000FFF300006C1008C0D0C8598C30292102F6 +:104B90000C0C4760000C8E300E1E5065E19E2921E2 +:104BA00002C0C116DF85090B4C65B0908A300A6ED1 +:104BB0005168E3026000852F629E1BDF7E6EF85312 +:104BC0002BB22668B0052E22007BE94727629DB7ED +:104BD00048CB7F97102B200CB04E0CBF11A6FF299D +:104BE000F2869E12798B4117DF7507B70A2772A3E9 +:104BF000687004882077893029F285DF90D7906526 +:104C000090652A210419DFAE7A9B22DA205806B873 +:104C1000600029002C21041BDFAA7CBB18DA20C00D +:104C2000B65806B3C95860014CC09063FFCCDA2077 +:104C300058089F600006DA20C0B658089D655135B7 +:104C4000DC40DB308D30DA200D6D5158070BC0D0C1 +:104C5000D3A064A120292102C05184A18CA0040406 +:104C6000470C0C4763FF3E00C09B8831DBD008F83F +:104C700050089B3828210498116E8823282066ACA0 +:104C80008C0C0C472C24667CBB159F139E148A1039 +:104C90008B1158071B8E148F13C0D02D24668A30B9 +:104CA000C092C1C81BDF5B7FA6099BF099F12CF471 +:104CB0000827FC106550A4B83ADF70C051C08007C7 +:104CC000583808084264806718DF3819DF392986A8 +:104CD0007E6A420AD30F6DE90500A08800F08CC0FF +:104CE000A08930B4E37F9628C0F207E90B2C940822 +:104CF0009B909F912F200C12DF380CF811A6882969 +:104D00008285A2FF2DF4CFD2A009330B238685D153 +:104D10000F22200C891218DF300C2B11A6BBA82201 +:104D20002D24CF2CB285D2A00C990B29B685D10F9A +:104D3000C087C0900A593879809663FF8ADB30DAE1 +:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2 +:104D500065AE4D2D2502C09063FE45009E142A2CA1 +:104D600074C0B1DC70DD405806F68E14C0D01BDF75 +:104D700028C1C863FF6AC020D10F00006C1006284C +:104D8000210217DF0D08084C65824929729E6F9831 +:104D90000260025019DF082A922668A0078B200AB9 +:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5 +:104DB000C0602B3008C0F164B0712E0AFFB0B86437 +:104DC00081512DBCFE64D0F364505C2A2C74044BDA +:104DD000025800AD0AA2020600000000001ADF0817 +:104DE0002C20076EBB0260022218DEFE13DF081BB8 +:104DF000DF36C0E229200A9AD09ED1ABCB039902BC +:104E000099D223B08026B480B13308330293D318EB +:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C +:104E2000EC0B2CD685655FA2C020D10F2B21048806 +:104E300031DE6008F85008CE386EB8102C2066B10C +:104E4000CC0C0C472C24667CEB026001AF2E30109A +:104E50002930112C301300993200CB3264E1452AFD +:104E600030141EDF1A00AA3278CF050E9C092BC41D +:104E70007F1CDF1766A0050E98092A8480B4A71846 +:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5 +:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C +:104EA0003303AC882A848B2CD03627848C03CC0126 +:104EB0000ECC022CD4365801AD63FF0B2F200C0C06 +:104EC000FB11A7BB2DB286C0987D9302600121190A +:104ED000DEBB09F90A2992A36890082D220009DD9A +:104EE0000C65D10C2DB285DE6064D10488312B2194 +:104EF0000408F85008CE386FB80263FEDF2C206635 +:104F0000B1CC0C0C472C24667CE30263FECE9D10D2 +:104F100060013100293108292504283014B0886443 +:104F200080A62B31092B240AC0812B30162FD423C5 +:104F30002B240BB4BC2C240C8D378B36292504DE96 +:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3 +:104F50009C1101C4048F380DBE1800C4040DB8188C +:104F600000881108FF02C08308CC0218DECC9CA187 +:104F700098A018DECB8C209EA39FA405CC110BCF4C +:104F800053C1E09EA50CFF0208FF029FA218DE8914 +:104F90002624662C729D2684A22CCC182C769D6328 +:104FA000FE250000002D30121CDECD00DA3278DF45 +:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A +:104FC0002A301100AA3263FEEC2E240A2B31099BF1 +:104FD0002B63FF5300CC57DA20DB30DC405807222C +:104FE000C020D10F00DA20C0B65807B163FFE5003A +:104FF00000DBF0DA205807AE63FFD9000058064006 +:105000001DDE70C0F126246663FE41008B20280A55 +:10501000FFB1CE23200A2C21040E0E472E24077840 +:1050200031359AD02CD50A96D319DEA62ED416C0C7 +:105030008398D1C0E309B80298D409390299D226DD +:10504000240763FDC958062E8D102624662B2104E3 +:105050002F200C63FD86000008B81119DE6808EEE9 +:1050600002882B9ED59AD0C0EF09880298D204C935 +:10507000110E990299D4C0E49ED163FFC1000000D3 +:105080006C1004C020D10F006C100485210D381164 +:1050900014DE478622A42408660C962205330B935F +:1050A00021743B13C862D230D10FC030BC29992182 +:1050B00099209322D230D10F233DF8932163FFE34F +:1050C0006C100AD620941817DE3CD930B8389819DD +:1050D0009914655256C0E1D2E02E61021DDE390EF0 +:1050E0000E4C65E1628F308E190F6F512FFCFD65FC +:1050F000F1558EE129D0230E8F5077E66B8F181E65 +:10510000DE78B0FF0FF4110F1F146590CE18DE7516 +:105110008C60A8CCC0B119DE2728600B09CC0B0D20 +:10512000880929812028811E2A0A0009880C08BACA +:10513000381BDE6B0CA90A2992947B9B0260008CC1 +:105140002B600C94160CBD11A7DD29D286B84879C6 +:1051500083026000D219DE1909B80A2882A39817C1 +:105160006880026000A36000A51ADE5F84180AEE62 +:1051700001CA981BDE108C192BB0008CC06EB313C3 +:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B +:10519000AE6000380C0C5360000900000018DE51AE +:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4 +:1051B000880929812028811E2A0A0009880C08BA3A +:1051C000380CA90A2992947E930263FF72DA60C0B8 +:1051D000BA58073764507360026A00001ADDF68C13 +:1051E000192AA0008CC06EA31A18DDF20C1C5208FC +:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6 +:1052000063FFC9000C0C5363FF0989607899182962 +:10521000D285C9922B729E1DDDE76EB8232DD22652 +:10522000991369D00B60000DDA60580721600017F0 +:105230000088607D890A9A1A29729D9C129915CF5F +:1052400095DA60C0B658071A6551F98D148C18DBD1 +:10525000D08DD0066A020D6D51580587D3A09A14DF +:1052600064A1E182A085A1B8AF9F1905054702029C +:10527000479518C05163FE602B6104C08B8931C013 +:10528000A009F950098A386EB81F2C6066A2CC0CB0 +:105290000C472C64667CAB119F119E1B8A15580528 +:1052A000988E1B8F11C0A02A64669F1164F0E58957 +:1052B0001388190FFD022E0A006DD9172F810300E4 +:1052C000908DAEFE0080889F9200908C008088B800 +:1052D0009900908C65514E8A10851A8B301FDDC85D +:1052E000881229600708580A2C82942D61040ECC7C +:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D +:105300005D50A29909094729C48065D0DA2E600C46 +:10531000C0D01FDDB10CE811AFEEA7882282852D29 +:10532000E4CF02420B228685D2A0D10F8E300E0E22 +:105330004763FDA2A29C0C0C472C64077AB6CD8B68 +:10534000602E600A280AFF08E80C64810E18DDDD73 +:1053500083168213B33902330B2C34162D350AC051 +:105360002392319F30C020923308B20208E80292A3 +:10537000349832C0802864072B600CD2A01CDD96C4 +:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52 +:105390002DE685D10F8B1888138D30B88C0D8F4773 +:1053A0000D4950B4990499100D0D5F04DD1009FFEB +:1053B000029F800DBB029B8165508D851AB83AC053 +:1053C000F1C0800CF83808084264806B1BDD771947 +:1053D000DD7829B67E8D18B0DD6DDA0500A0880075 +:1053E000C08CC0A063FEF30082138B161DDD8828DD +:1053F000600AC0E02EC4800D880202B20B99239F80 +:1054000020C0D298229D2122600CB2BB0C2D11A786 +:10541000DD28D28508BB0B18DD702BD685A8222E7F +:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7 +:10543000FF168E1B63FEA300C087C0900AF938795F +:10544000809263FF86C020D10F9E1B2A6C74C0B16E +:105450008D1858053B8E1B851A63FE7E886B821360 +:10546000891608BE110ECE0202920B9E25B4991E1B +:10547000DD639F200E88029822C0EF04D8110E88A9 +:10548000029824C0E49E21C080D2A02B600C286426 +:10549000071CDD510CBE11A7EE2DE285ACBB28B474 +:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0 +:1054B00020D10F006C10048633C071C03060000131 +:1054C000B13300310400741A0462017460F1D10F29 +:1054D0006C1004022A02033B025BFFF61CDD391B41 +:1054E000DD83C79F88B009A903098A019AB0798032 +:1054F0001EC0F00FE4311DDD300002002BD2821EF1 +:10550000DD7C2AC1020EBB022BD6820AE431D10F08 +:1055100028C102C19009880208084F28C50208E482 +:1055200031D10F006C1004C0C00CE43112DD251A1B +:10553000DD2200020029A28218DD701BDD6E26210B +:10554000020B990108660129A68226250206E4318C +:1055500014DD6B15DD66236A9023261685502426FC +:1055600015252617222C50D10F0000006C1008D6EC +:10557000102B0A64291AB41ADD0F0D23111CDD103B +:105580000F2511B81898130E551118DD5DAC55A8EC +:1055900038AA332C80FF2A80FEA933288D01298068 +:1055A0000108AA112880000CAA02088811098802A3 +:1055B00008AA1C288C0828160458086814DD010A5B +:1055C000A70224411A2A30802B120407AA2858085F +:1055D00063B1338B13B4559A6004AC28B4662C566F +:1055E0002B7B69E016DD3A9412C050C0D017DCF472 +:1055F0009D15D370D4102F60802E60829F169E1749 +:10560000881672891A8D128C402A607F0DCC282B47 +:105610003A200CAA28580851C0B10ABE372E354886 +:105620008F1772F91A8D128C402A60810DCC282BAD +:105630003A200CAA28580849C0B10ABE372E354A6C +:10564000B233B444B1556952B6B466C0508F15B880 +:1056500077D370B2FF9F156EF899D10F6C1004C00C +:1056600021D10F006C1004270A001CDCD31FDCE4DE +:105670001EDCE71DDCD01ADD141BDD22C02824B09F +:10568000006D2A75AA48288080C09164806100411D +:105690000415DCCBC03125503600361A06550105FD +:1056A00095390C56110C66082962966E974D0D5966 +:1056B0000A29922468900812DD0602420872993B7A +:1056C00023629512DCC8CB349F300282020E440262 +:1056D000C092993194329233AD52246295C0902495 +:1056E0004C1024669524B0002924A0AA42292480C5 +:1056F000B177B14404044224B400D10FD10FD10FCB +:105700006C10041ADCAC2AA00058021C5BFFD50206 +:105710002A02033B025BFFD11BDCAAC9A12CB10208 +:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF +:10573000C0A00AE43118DCA00002002F828219DC2C +:10574000B32EB10209FF022F86820EE431D10F0081 +:105750006C1004C02002E43114DC9A16DC970002BD +:1057600000226282234102732F0603E431C020D15C +:105770000F19DCE61ADCE52841020A2A0109880132 +:105780002A668228450208E43115DCDC12DCE125BA +:105790004621D10F6C1004292006289CF96480A0B2 +:1057A0002A9CFD65A0968A288D262F0A087AD9049E +:1057B0002B221FC8BD2C206464C0812E22090EAE8E +:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7 +:1057D000C28619DC7A78F3026000AD09B90A299211 +:1057E000A36890082E220009EE0C65E09B29C28573 +:1057F0001FDC846490929F90C0E41FDC919E9128EE +:10580000200AC0E09E930F8802989288200F880299 +:1058100098942F20079A979D962F950A2E24072853 +:10582000200629206468833328C28512DC6B288C0B +:1058300020A2B22E24CF28C685C020D10FC020D1EF +:105840000F2A206A0111020A2A4165AF52DA20C0EC +:10585000B05805EA64AFE5C021D10F00649FC81FAE +:10586000DC582D20168FF209DD0C00F10400DD1A42 +:10587000ADAD9D2912DC5928C285A2B22E24CF28B5 +:105880008C2028C685C020D10FC021D10F00000078 +:105890006C1004260A001BDC9F15DC4928206517C4 +:1058A000DC46288CFE6480940C4D110DBD082CD272 +:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2 +:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF +:1058D000F52AD6F406E4310002002872822AFAFF83 +:1058E000004104290A012F510200991A0A9903095B +:1058F00088012876820FE4312624652BD2F48E5C51 +:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7 +:105910000CB80C09FF0C08FF0C0F2F14C8F960001D +:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE +:10593000020B0B4F2B55020BE431D10F00DB30DA99 +:10594000205BFF941BDC7464AF5D0C4D11ADBD6337 +:10595000FFA8000006E4310002002F728218DC303C +:105960002E510208FF022F76820EE431D10F000083 +:105970006C1004C03003E43116DC1015DC11000299 +:105980000024628274472118DC64875C084801287F +:105990006682CD7319DC620C2A11AA99229283299E +:1059A00092847291038220CC292B51020BE431C0E6 +:1059B00020D10F001FDC5B2E51020FEE012E55028D +:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B +:1059D000561DD10F6C10061BDBF71EDBF922B00041 +:1059E0001ADC526F23721DDC39C04818DC511FDCF1 +:1059F0004FDC10D5C083F000808600508A6D4A4F7E +:105A00000F35110D34092440800B560A296294B1D8 +:105A1000330E55092251480F44110C440A8740099E +:105A2000A80C02883622514907883608770CA899B5 +:105A30002966949740296295874109A80C02883607 +:105A400007883608770CA899296695974103034281 +:105A5000B13808084298F0D10F1CDC3613DC372728 +:105A6000B0002332B5647057C091C0D016DC351534 +:105A7000DC33C0402AC00003884328C4006D793C51 +:105A8000004104B14400971A7780148E502FB295CC +:105A90002DB695AFEE2EED2006EE369E5060001826 +:105AA00077A00983509D5023B69560000223B295DC +:105AB000223D2006223622B695B455B8BBD10F0040 +:105AC00003884328C400D10F6C1004C04004E431A3 +:105AD00015DC1D000200885013DC1CCB815BFFBD70 +:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E +:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5 +:105B0000990C0929146000050BA90C092914993076 +:105B100015DBAC2A51020AE4312A2CFC58004B2B2D +:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084 +:105B3000D10F000004E4311EDBA00002002DE28240 +:105B40002FBAFF2C51020FDD012DE6820CE431D17A +:105B50000F0000006C1004D10F0000006C1004C096 +:105B600020D10F006C100413DBFAC0D103230923EA +:105B7000318FC0A06F340260008D19DB8F1BDB906A +:105B800017DBF30C2811A8772672832572822CFA72 +:105B9000FF76514788502E7285255C0425768275E4 +:105BA000E9052572842576827659292E72842E760F +:105BB000822E76830AE431000200239282002104BF +:105BC0002FB10200D61A0C66030633012396820F0A +:105BD000E43126728325728260000200D8A07659D3 +:105BE000220AE43100020023928200210400D21A2A +:105BF0002FB1020C22030232012296820FE431D22D +:105C000080D10F00D280D10FC020D10F6C1004DBE7 +:105C100030862015DB68280A00282502DA2028B003 +:105C2000002CB00705880A28824C2D0A010B800041 +:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47 +:105C4000769101D10F2BA6A3D10F00006C1004C0D8 +:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B +:105C60007786758574C0A076516288508E77B4555A +:105C7000957475E903857695747659278F769F75A7 +:105C80009F740AE431000200239282B42E2FB102E5 +:105C900000E10400D61A0C66030633012396820F36 +:105CA000E431867583747639280AE4310002002EC7 +:105CB0009282B42200210424B10200DF1A0CFF03F7 +:105CC0000FEE012E968204E431D280D10FD8A07657 +:105CD00051D6D280D10F00006C1004290A801EDB3F +:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4 +:105CF000B2850FCC029ED19CD0C051C07013DB592D +:105D000014DB5818DB562AB285A82804240A234637 +:105D100091A986B8AA2AB685A98827849F25649F59 +:105D2000D10F00006C100419DB8B0C2A11A9A98972 +:105D300090C484798B761BDB79ABAC2AC2832CC2EE +:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0 +:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D +:105D6000C94AA929299D0129901F68913270A6036B +:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98 +:105D8000FFB3D230D10F000013DB7503A3018C31B8 +:105D90001DDB130C8C140DCC012CB6A363FFDC00AF +:105DA000C020D10FDA205BFFCCC020D10FC020D1A2 +:105DB0000F0000006C1004DB30C0D019DAFEDA20CE +:105DC00028300022300708481209880A28824CDC53 +:105DD000200B80001BDAF90C4A11ABAA29A2840916 +:105DE000290B29A684D10F006C1004C04118DAF2E7 +:105DF00017DAF40C2611A727277038A866256286C3 +:105E0000007104A35500441A75414822628415DBD1 +:105E10001502320BC922882117DAF10884140744CD +:105E200001754905C834C020D10FD10F0809471D9D +:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C +:105E4000A00FEE0A2DE6242A6284C0200A990B29AD +:105E50006684D10FC020D10F6C1004DB30C0D01885 +:105E6000DAD5DA2025300022300708580A28824C7B +:105E7000DC200B80008931709E121BDACF0C4A1196 +:105E8000ABAA29A28409290B29A684D10F09C952DA +:105E900068532600910418DACAC0A12F811600AAFF +:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26 +:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830 +:105EC0009A0A0A472EF11600A10400881A08EE0269 +:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50 +:105EE0000B2BC684D10F00006C1004DB30C0D0191E +:105EF000DAB1DA2028300022300709880A28824CDB +:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439 +:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5 +:105F200016DAA80C2711A626266038A87225228624 +:105F3000006104A35500441A7541082222840232EC +:105F40000BD10F00C020D10F6C100415DB050249E6 +:105F5000142956112452120208430F8811C07300ED +:105F6000810400361A008104C78F00771A0877036E +:105F7000074401064402245612D10F006C10066E2D +:105F800023026000AC6420A7C0A0851013DADD16E0 +:105F9000DAF4C040A6AA2BA2AE0B19416490666841 +:105FA000915D68925268933C2AA2AA283C7F288C73 +:105FB0007F0A0A4D2980012880002AACF208881146 +:105FC0000988027589462B3D0129B0002BB00108D4 +:105FD00099110B99027A9934B8332A2A00B1447284 +:105FE00049B160004A7FBF0715DADF63FFB90000DF +:105FF000253AE863FFB10000253AE863FFA90000F5 +:10600000250A6463FFA1C05A63FF9C0000705F080B +:106010002534FF058C142C34FE70AF0B0A8D142E22 +:106020003D012AE4012DE400DA405BFD5063FFA747 +:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8 +:106040001BDACBC080C07160000D00000022A438B4 +:10605000B1AA299C107B915F26928679C2156E6247 +:1060600062C0206D080AB12200210400741A764B28 +:10607000DB63FFEE2292850D6311032514645FCF6D +:10608000D650032D436DD9039820B4220644146DD5 +:106090004922982098219822982398249825982678 +:1060A000982798289829982A982B982C982D982EDC +:1060B000982F222C4063FF971EDA4027E68027E6C0 +:1060C00081D10F00C02063FF830000006C1004C06A +:1060D00062C04112DA3B1ADA3713DA522AA00023DF +:1060E000322D19DA9F2BACFE2992AE6EA30260000E +:1060F0008E090E402D1AC2C2CD0EDC392C251A6431 +:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB +:10611000015805922B211A0ABB28D3A09B50580581 +:10612000A92B52000ABB082A0A005805A815DA91C3 +:106130002D21022C3AE80C3C2804DD022D25029C7E +:10614000505805A08B50AABBC0A15805A01CDA8AE4 +:106150002D21020C3C2806DD0213DA882D25029C35 +:10616000305805988B30AABBC0A25805982A210246 +:10617000C0B40BAA020A0A4F2A25025805ACD10F57 +:10618000242423C3CC2C251A63FF760018DA801C44 +:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF +:1061A0001FDA7C2D203624F47A24F47E24F4820E27 +:1061B000DD0124F4862E0AF707552806DD02C07596 +:1061C0000EDD01050506AB5BA959C0E8AC5C24C433 +:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7 +:1061E000DD0124B4EBC2E027942C0EDD0224942BB5 +:1061F0002E0A800D0D4627546C24546B0EDD022DA3 +:10620000243663FEFC0000006C10042A0A302B0ABE +:10621000035BFF4D12DA53C390292616C3A1C0B306 +:10622000C08A2826175BFF48C03CC3B12B26161A2C +:10623000D9E42AA02023261764A079C3A2C0B15BA9 +:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F +:10625000C0B12326175BFF3CC28F282616C0FE2F35 +:106260002617C2E22E26162A0AA1C0B1C0D82D26B2 +:10627000175BFF352A0AA12A2616C3A6C0B3C1920E +:106280002926175BFF31C3C62C2616C1B32A0AA2E2 +:106290002B2617C0B35BFF2C290AA2292616C1851D +:1062A000282617C2FB2F2616C0E72E26171DDA391F +:1062B0002D2610D10FC3A2C0B35BFF2363FF820062 +:1062C0006C10041CDA031BD9ED18DA3317DA341614 +:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC +:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A +:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A +:10630000A4C2A7CC2D248C2B248A2B24872E248B4B +:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6 +:106320001BDA22C0286D2A33DAC0D9C07C5B020F89 +:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD +:106340002584A4C2B1BBA7CC2D248C2E248B2A2457 +:106350008A2E369F2C369E2C369DB1ACC07919D929 +:10636000D81BDA1413DA121ADA1218DA1314D9D97C +:1063700016DA1304F42812DA1204660C040506A2D5 +:1063800052A858AA5AA3539B3029A50027848AC033 +:1063900091C0A52A848C29848B17DA0B18DA0AA7F6 +:1063A0005726361D26361E2E361F16DA0813DA0833 +:1063B000A65504330C2826C82E75002D54AC2E5437 +:1063C000AB2E54AA2326E62326E52E26E7D10F007E +:1063D0006C100613D99417D9E224723D2232937FB0 +:1063E0002F0B6D08052832937F8F0263FFF3C0C423 +:1063F000C0B01AD973C051D94004593929A4206EAC +:1064000044020BB502C3281ED96EDDB025E4220577 +:106410002D392DE421C0501ED9EF19D9DF18D9DF4D +:1064200016D9E11DD9ED94102A724517D9AB6DA983 +:106430004BD450B3557A5B17DF50756B071FD9608B +:106440008FF00F5F0C12D9A302F228AE2222D68160 +:10645000D54013D9A0746B0715D95A855005450C42 +:10646000035328B145A73FA832A93322369D2236CF +:106470009E2436802B369F2BF48B2CF48C14D969F8 +:1064800024424DC030041414C84C6D0806B13304C6 +:106490001414C84263FFF20015D947C44000310408 +:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E +:1064B00018D95D2B824D29824E29A5202882537A36 +:1064C000871E2C54008E106FE45D12D93D2F2121C0 +:1064D0002321202F251F04330C23252023251ED103 +:1064E0000FC06218D99F88807E87D98910265400F2 +:1064F0006F94191BD9332AB1200A1A1404AA0C2A42 +:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB +:106510002AB1200A1A1403AA0C2AB5202AB5212A66 +:10652000B51E2AB51FD10F001CD9262BC1212DC1A4 +:10653000202BC51F03DD0C2DC5202DC51ED10F003E +:106540006C100619D91F14D98612D93615D9A3C7CC +:106550003FC0E02E56A82E56A92E56AA2E56AB2383 +:10656000262918D946DB101CD99DC0D42A42452DB6 +:1065700016012C160000B0890A880C98905BFF94D5 +:106580002C22E318D90F0C5C149C842B22E48C84FD +:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479 +:1065A0002A86062922CD0959142986072F22892FE8 +:1065B00086095BFF435BFF1423463BC1B01ED90035 +:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77 +:1065D000E5025804965BFEBD5BFE96C050C0B01647 +:1065E000D8F614D8FE17D96FC0C0C73E93122C2618 +:1065F0002DC0306000440000007F9F0FB155091985 +:1066000014659FF4C0500AA9027FA7EF18D8EADAF0 +:106610005008580A28822C2B0A000B8000005104D5 +:10662000D2A0C091C7AF00991A0A99039912CE3827 +:1066300064206BD3202B20072516032C12022A621C +:10664000827CA86318D8DC01110208580A28822C21 +:10665000DA500B8000D2A0643FD58A310A8A140434 +:10666000AA01C82A2B22010B8B1404BB017BA9456C +:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009 +:1066800019D8CD1AD91488130ADA28DC801DD951FB +:1066900009880A28823C0DAA080B8000652F93D335 +:1066A00020C0B063FF9400007FAF34B155005004A8 +:1066B0000A091963FF42DAB07B7B081AD8C12AA203 +:1066C000000ABA0C1BD9048C310BAB280C8A141CA1 +:1066D000D941ACBB1CD94104AA012BC68163FF8FF1 +:1066E000645F60C050C0B0C7CE9C1263FF5500000D +:1066F0006C100427221EC08008E4311BD8AF0002B2 +:10670000002AB28219D8AF003104C06100661A298C +:1067100091020A6A022AB68209E43115D90C0C38B2 +:1067200011A8532832822432842A8CFC7841102903 +:1067300021022A368297A0096902292502D10F0079 +:106740002B21022C32850B6B022CCCFC2C36829731 +:10675000C02B2502D10F00006C1004C0E71DD89299 +:106760001CD8940D4911D7208B228A200B4B0BD2B9 +:10677000A007A80C9B72288CF4C8346F8E026000AE +:10678000A31FD88AA298AF7B78B334C93DC081C01B +:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1 +:1067A0000500308800508C887008980878B16DD248 +:1067B000A09870D10FC0F0038F387FE0DE63FFD860 +:1067C000027B0CAFBB0B990C643047D830C0F1C0D2 +:1067D0005002F5380505426450792CD67E0B3612EE +:1067E0002F6C100F4F366DFA0500808800208C0644 +:1067F000440CC081C05003B208237C0C03853805CB +:10680000054264505A2CD67ED30F6D4A050020886D +:1068100000308CD2A0A798BC889870D10FD2A0BCB1 +:10682000799970D10FD2302BAD08C0F1C0500BF563 +:1068300038050542CB542CD67E083F14260A100F8B +:10684000660C0646366D6A0500208800B08C8270A2 +:1068500063FF2D00C05003F53875E08063FF7A00B8 +:10686000C06002863876E09F63FF9900C05003F550 +:106870003875E0C463FFBE006C1004D62068520F68 +:10688000695324DA20DB30DC405800F7D2A0D10F66 +:10689000DA20DB30DC405800F49A2424240EC02196 +:1068A00022640FC020D10F00B83BB04C2A2C748951 +:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE +:1068C0002D240E2890072D9003A488B088B1DD2DCB +:1068D00094032894075BFFA069511DC0E082242A1D +:1068E000600F18D8BF2A240329600E8F202924079F +:1068F00008FF029F209E64D10FC020D10F0000002E +:106900006C1004942319D8B7C0B3083A110BAA022B +:10691000992019D8299A2116D827C05028929D2548 +:1069200064A2288C1828969DD10F00006C100428B2 +:106930002066C038232406B788282466D10F0000BB +:106940006C10060D3C111AD819D820035B0C862256 +:106950000D55118221AA8902320B928105630C9395 +:10696000820C550C792B54CB531CD8111DD80FC059 +:10697000F7A256C031C0A0043A380A0A42769343BF +:10698000044302C9AB2CD67ED30F6DBA0500208814 +:1069900000308C8281A25272917D92818382C83EA6 +:1069A000D10FC071C06002763876F0DB63FFD5008E +:1069B000C020BC89998199809282D10F222DF892B2 +:1069C0008163FFA219D7FA02860CA9669611D940F5 +:1069D000063612961006BB0C64A0442CD67E8A1094 +:1069E000D30F6DAA0500208800908CBC828311C053 +:1069F000E0A433240A01034E380E0E42CAEC2CD612 +:106A00007E6DBA0500208800308C821102520CA2E3 +:106A100082BC22928163FF83BC82928163FF7C00EF +:106A2000C06002363876F0B563FFAF00C070024731 +:106A30003877F0CC63FFC6006C100414D7EBC1525A +:106A4000A424CA3128221D73811C292102659016B5 +:106A50002A300075A912022A02033B022C3007C01B +:106A6000D25801D5653FDCD10F2B300703BB0B0B90 +:106A7000BA0274B3022ABDF8D3A063FFC4000000B9 +:106A80006C1004292006C0706E9741292102C08F26 +:106A90002A2014C0B62B240606AA022A24147980C0 +:106AA000022725022A221E2C221D7AC10EC8ABDA2B +:106AB00020DB302C0A00033D025BF7F96450892D7E +:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C +:106AD00064E0962F21020F0F4C65F0A51AD7B71E60 +:106AE000D7B529A29EC08A798B712BE22668B004A3 +:106AF0008C207BC96629A29D1FD7B264905D9790B8 +:106B0000C0C31DD7C62B21049D9608BB110CBB0228 +:106B10009B919B971CD7C3C08527E4A22BA29D28DD +:106B200024068DFA282102B0DD2BBC302BA69D9DBA +:106B3000FA0C8802282502C8D2C020D10F8EF91283 +:106B4000D7B92E2689C020D10F283000688938DABD +:106B500020DB30DC4058004463FF6300022A022B34 +:106B60000A065800D3220A00D10F655010293000C0 +:106B7000689924022A02033B02DC4058003BC020F3 +:106B8000D10FD270D10F00002A2C74033B02044CA9 +:106B9000025BFEF163FF2700DB30DC402A2C745BD4 +:106BA000FEEEC020D10F00006C1004C83F8926887B +:106BB00029A399992609880C080848282525CC522C +:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B +:106BD0006C1004D820D73082220D451105220C926A +:106BE0008264207407420B13D771D420A3837323CC +:106BF00002242DF8858074514CBC82C0906D08161B +:106C000000408800708C773903D720C0918680744B +:106C10003901D42074610263FFE2CA98C097C04171 +:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28 +:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591 +:106C400000208800308C9780D270D10FBC8FC0E0BC +:106C50000F4E387E90E263FFD6BC8292819280C054 +:106C6000209282D10F0000006C1006C0D71CD74EB6 +:106C70001BD7500D4911D7202E221F28221D0E4E42 +:106C80000BD280078A0C2E761F2AAC80C8346FAED8 +:106C9000026000CB2F0A801AD754A29EAA7A7EA344 +:106CA0003FC93FC0E1C05002E538050542CA552B37 +:106CB000C67EDB20D30F6D4A0500308800B08C2ED5 +:106CC000721DAE9E0EA50C645086D2802E761DC01D +:106CD00091298403D10FC05003E53875D0D363FFE9 +:106CE000CD15D741027E0CA5EE643051C0A1250A16 +:106CF0000002A538033A020505426450922BC67E75 +:106D00000E35129510255C10054536D30F6D5A05CA +:106D100000A08800208CC0A1A3E2C05023FA800309 +:106D2000730C03A538AF730505426450722BC67E01 +:106D3000851005450C6D5A0500208800308CD280E6 +:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D +:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2 +:106D6000D2302E8D08C0F1C0500EF538050542CB4B +:106D7000592BC67E0A3F14C1600F660C064636D3F7 +:106D80000F6D6A0500208800E08C22721D63FF03EE +:106D9000C061C05003653875D80263FF6263FF5C51 +:106DA000C05002A53875D08763FF8100C06003F62C +:106DB0003876D0BF63FFB9006C10042A2015292053 +:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F +:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD +:106DE0000A472A2415CAAF8B438942B0A8009104F0 +:106DF00000881AA8FF0FBB029B278F260FB80C78BC +:106E00003B1AC020D10F0000292102C0A20A99021A +:106E1000292502C021D10F008B2763FFDC2BD12055 +:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4 +:106E30008B438C288F42B0AD00F10400DD1AADCC3D +:106E40000CBB029B27DA20B7EB580019C021D10FE9 +:106E50009F2763FFEF0000006C100428203C643083 +:106E60004705306000073E01053EB156076539050C +:106E70004928C77FA933030641076603B1660606A2 +:106E800041A6337E871E222125291AFC732B150269 +:106E9000380C09816000063E01023EB124064239E9 +:106EA00003220AD10FD230D10FC05163FFC00000BE +:106EB0006C100427221EC08008E4311DD6BF0002DA +:106EC000002CD2821BD6BF003104C06100661A2B91 +:106ED000B1020C6C022CD6820BE43119D7440C3A67 +:106EE00011AA932832829780253282243284B455A5 +:106EF00025368275410A292102096902292502D114 +:106F00000F2A21022B32830A6A022B36822A25029B +:106F1000D10F00006C100418D6A80C2711087708B0 +:106F2000267286253C04765B1315D6A405220A2218 +:106F300022A3682002742904227285D10FC020D1B7 +:106F40000F0000006C100419D6A727221EC080096C +:106F5000770208E4311DD6980002002CD2821BD69D +:106F600098003104C06100661A2BB1020C6C022C2F +:106F7000D6820BE43119D71D0C3A11AA932832821C +:106F80009780253282243284B45525368275410B90 +:106F90002A21020A6A022A2502D10F002B21022C83 +:106FA00032830B6B022C36822B2502D10F0000009E +:106FB0006C10041BD6810C2A11ABAA29A286B43806 +:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF +:106FD000290868B00274B90D299D0129901F6E928D +:106FE0000822A285D10FC020D10FC892C020D10F96 +:106FF000DA205BEE88C020D10F0000006C10041472 +:10700000D66E28429E19D66B6F88026000BA29920C +:10701000266890078A2009AA0C65A0AC2A429DC068 +:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA +:10703000C28609B90A7ED30260009A2992A3689099 +:10704000078D2009DD0C65D08C25C2856450862D06 +:107050002104C0306ED80D2C2066B8CC0C0C472C07 +:10706000246665C07B1CD6E218D66B1AD66219D688 +:10707000731DD667C0E49E519D508F209357935542 +:1070800099539A569A5408FF021AD6839F5288261B +:107090009F5A9E599D58935E9C5D935C9A5B08082D +:1070A00048058811985FC0D81FD64C0CB911A49917 +:1070B000289285AFBF23F4CF288C402896858E2652 +:1070C0002D24069E29C020D10FCA33DA20C0B65B1A +:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0 +:1070E0000FDBD05BFE072324662B200C63FF7500AB +:1070F000C72FD10FC72FD10F6C1004C85B292006F2 +:1071000068941C689607C020D10FC020D10FDA20E8 +:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF +:107120002E200C18D6250CEF11A8FF29F286C08856 +:10713000798B791AD6220AEA0A2AA2A368A0048BBC +:10714000207AB96823F2856430621BD62C290A8024 +:107150002C20682820672D21040B881104DD1108DC +:10716000DD020DCC02C0842D4A100DCC021DD624A8 +:1071700098319D308A2B99379C340BAA02C0C09C51 +:10718000359C369A322A2C74DB4028F285C0D328ED +:107190008C2028F6852C25042D24061FD60FDD40D3 +:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE +:1071B000E05BFF3FC020D10F6C100AD6302A2006BA +:1071C00024160128ACF86583862B2122C0F22A21DF +:1071D00024CC572AAC010A0A4F2A25247ABB026024 +:1071E000037F2C21020C0C4C65C3192E22158D3205 +:1071F000C0910EDD0C65D39088381ED5EF64836B8B +:107200008C37C0B8C0960CB9399914B49A9A120D3B +:10721000991199138F6718D5EAC9FB2880217F83BC +:10722000168B142C22002A200C5BFF61D4A064A3CF +:10723000B38F6760002800002B200C89120CBA1154 +:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B +:10725000A368D00488207D893024A28564436427F4 +:10726000212E07F73607F90C6F9D01D7F0DA20DBE6 +:1072700070C1C42D211F5BFEF889268827DDA00977 +:10728000880C7A8B179A10600006C04063FFCC0010 +:1072900000DA208B105BFEC88D1065A267C0E09EEF +:1072A000488C649C498B658A669B4A9A4B97458FAC +:1072B000677F7302600120CD529D10DA20DB302CF5 +:1072C00012015BFE698D10C051D6A08FA7C0C08A85 +:1072D00068974D9A4C8869896A984E994F8E6A8A48 +:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5 +:1072F0000B8E1477B701C0A1C091C08493159D1760 +:107300009516C0D025203CC030085801089338C0DD +:1073100082083310085B010535400B9D3807DD10EE +:107320000BAB100E19402A211F07991003DD020D27 +:10733000BB020553100933020A55112921250A2AD7 +:10734000140929140499110A99020933028A2B2974 +:1073500021040BAA021BD6270899110955020855CA +:10736000020BAA029A408920881408991109880200 +:1073700019D5A61DD62109880298418B2A934695D6 +:107380004783150DBB0285168D179B448A65896658 +:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7 +:1073A00088268E29AD87972607EE0C0E0E482E25CF +:1073B000259B672B200C87131ED5800CB911AE9925 +:1073C000289285A78828968517D584C090A7BB29C1 +:1073D000B4CF871863FE3C008C60C0E0C091C0F061 +:1073E000C034C0B82A210428203C08AA110B8B0104 +:1073F000038301039F380B9B39C03208FF100388B9 +:1074000001089E380C881407EE100FEE0203880165 +:1074100008983905BF1029211F0ABB1107881008D9 +:10742000FF020BAA0218D57809291403AA022B21FE +:107430002583200B2B1404BB110833110FBB020B47 +:1074400099028B148F2A0B33020833028B2B647042 +:10745000868868974D984C8769886A9341994697C2 +:107460004E984FC07077C701C0719A4718D5E30B8B +:107470007C100CEC0208F802984418D5E00CBC0211 +:1074800008CC029C402A200C295CFEC0801FD54AF3 +:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81 +:1074A000132CE28528A4CFAFCC2CE6852A22152BFD +:1074B0002524B1AA2A26156490DBC9D28F262E2254 +:1074C000090DFF082F26060FEE0C0E0E482E25255F +:1074D0006550E4C020D10F00C07093419F4499468D +:1074E0009A4777C70A1CD5362CC022C0810C873832 +:1074F0001CD5C40B781008E80208B8020C88029862 +:107500004063FF8000CC57DA20DB608C115BFDD636 +:10751000292102689806689403C020D10F2B221EEF +:10752000C0A029221D2A25027B9901C0B064BFE8B2 +:1075300013D5212CB00728B000DA2003880A28824E +:107540004CC0D10B8000DBA065AFE763FFCA000031 +:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3 +:10756000D10FC16DC19D29252C60000429252CD681 +:10757000902624672F2468DA20DB308C11DD502E12 +:107580000A805BFD3FD2A0D10FC168C1A82A252C7B +:1075900063FFDD000000C8DF8C268B29ADCC9C2664 +:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2 +:1075B000015BFD87D2A0D10F2A2C748B115BF6B230 +:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088 +:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1 +:1075E000252463FF1FDA202B200C5BFE5663FF145B +:1075F00012D5858220028257C82163FFFC12D581F3 +:1076000003E83004EE3005B13093209421952263D5 +:10761000FFFC000010D57D910092019302940311AC +:10762000D554821001EA30A21101F031C04004E4C7 +:107630001600020011D5768210234A00032202921E +:107640001011D540C021921004E4318403830282DA +:1076500001810000D23001230000000010D56D919F +:107660000092019302940311D543821001EA30A2E3 +:107670001101F131C04004E41600020011D564820A +:107680001013D4E7032202921004E431840383022E +:107690008201810000D330013300000010D55E91DB +:1076A00000810165104981026510448103CF1F925A +:1076B000019302940311D531821001EA30A2110125 +:1076C000F231C04004E41600020011D550821013BC +:1076D000D4CF032202921004E43184038302820196 +:1076E000C010910391029101810000D43001430048 +:1076F00012D500C03028374028374428374828376B +:107700004C233D017233ED03020063FFFC000000D7 +:1077100010D542910092019302940311D54082103A +:10772000921011D4F28310032202921011D53D124F +:10773000D5049210C04004E41600020011D5348232 +:107740001013D4EB032202921004E4318403830269 +:107750008201810000D53001530000006C10026EE0 +:10776000322FD620056F04043F04745B2A05440CB5 +:1077700000410400331A220A006D490D73630403AB +:10778000660CB1220F2211031314736302222C0121 +:10779000D10FC83BD10F000073630CC021D10F0083 +:1077A0000000000044495630C020D10F6C10020088 +:1077B00040046B4C07032318020219D10F0203196E +:1077C000C020D10F6C100202EA30D10F6C1002CC35 +:1077D0002503F03160000F006F220503F1316000D6 +:1077E000056F230503F231000200D10F6C1002CCAB +:1077F0002502F030D10F00006F220402F130D10FCA +:107800006F230402F230D10FC020D10F6C1002227E +:107810000A20230A006D280E2837402837442837CD +:107820004828374C233D01030200D10F6C1002029F +:10783000E431D10F0A0000004368656C73696F2062 +:1078400046572044454255473D3020284275696CD3 +:1078500074204D6F6E204D61722020382031373AF0 +:1078600032383A3135205053542032303130206F85 +:107870006E20636C656F70617472612E61736963F1 +:1078800064657369676E6572732E636F6D3A2F68F6 +:107890006F6D652F66656C69782F772F66775F3718 +:1078A0002E392D6977617270292C205665727369A3 +:1078B0006F6E2054337878203030372E30612E3080 +:1078C00030202D20313030373061303010070A0041 +:0478D0000BDFE8756D +:00000001FF diff --git a/firmware/cxgb3/t3fw-7.4.0.bin.ihex b/firmware/cxgb3/t3fw-7.4.0.bin.ihex deleted file mode 100644 index 38dda94bfa6f309643343e66f461095226905cc5..0000000000000000000000000000000000000000 --- a/firmware/cxgb3/t3fw-7.4.0.bin.ihex +++ /dev/null @@ -1,1917 +0,0 @@ -:1000000060007400200380002003700000001000D6 -:1000100000002000E100028400070000E1000288E7 -:1000200000010000E0000000E00000A0010000006E -:1000300044444440E3000183200200002001E0002A -:100040002001FF101FFFD0001FFFC000E300043C91 -:100050000200000020006B741FFFC29020006BBCE8 -:100060001FFFC29420006BFC1FFFC29820006C7021 -:100070001FFFC29C200003C0C00000E43100EA3131 -:1000800000A13100A03103020002ED306E2A05000C -:10009000ED3100020002160012FFDBC03014FFDA5F -:1000A000D30FD30FD30F03431F244C107249F0D347 -:1000B0000FD30FD30F12FFD5230A00240A00D30F4A -:1000C000D30FD30F03431F244C107249F0D30FD327 -:1000D0000FD30F14FFCE03421F14FFCB03421F1296 -:1000E000FFCCC0302D37302D37342D37382D373CED -:1000F000233D017233ED00020012FFC4C0302F37E0 -:10010000002F37102F37202F3730233D017233ED6A -:1001100000020012FFBEC0302737002737102737F4 -:1001200020273730233D017233ED03020012FFB95F -:1001300013FFBA0C0200932012FFB913FFB90C028F -:1001400000932012FFB8C0319320822012FFB71312 -:10015000FFB7932012FFB715FFB316FFB6C030D715 -:100160002005660160001B00000000000000000088 -:10017000043605000200D30FD30F05330C6E3B1479 -:100180000747140704437631E604360505330C6F40 -:100190003BED00020012FFA615FFA3230A00D720A3 -:1001A000070443043E0505330C0747146F3BF00377 -:1001B000020012FFA1C03014FFA1D30FD30FD30F41 -:1001C0009340B4447249F2D30FD30FD30F14FF9B63 -:1001D000834014FF9B834012FF9B230A0014FF9A65 -:1001E000D30FD30FD30F9340B4447249F2D30FD33C -:1001F0000FD30F14FF95834012FF95C92F832084DE -:10020000218522BC22743B0F8650B4559630B433FE -:100210007433F463FFE60000653FE1655FDE12FFC3 -:100220007C230A0028374028374428374828374C91 -:10023000233D017233ED03020000020012FF7AC079 -:1002400032032E0503020012FF7813FF819320C0B2 -:1002500011014931004831010200C00014FF7E0441 -:10026000D23115FF7D945014FF7D04D33115FF7CEE -:10027000945014FF7C04D43115FF7C24560014FFE5 -:100280007B04D53115FF7B24560010FF7A03000054 -:10029000000000000000000000000000000000005E -:1002A000000000000000000000000000000000004E -:1002B000000000000000000000000000000000003E -:1002C000000000000000000000000000000000002E -:1002D000000000000000000000000000000000001E -:1002E000000000000000000000000000000000000E -:1002F00000000000000000000000000000000000FE -:1003000000000000000000000000000000000000ED -:1003100000000000000000000000000000000000DD -:1003200000000000000000000000000000000000CD -:1003300000000000000000000000000000000000BD -:1003400000000000000000000000000000000000AD -:10035000000000000000000000000000000000009D -:10036000000000000000000000000000000000008D -:10037000000000000000000000000000000000007D -:10038000000000000000000000000000000000006D -:10039000000000000000000000000000000000005D -:1003A000000000000000000000000000000000004D -:1003B000000000000000000000000000000000003D -:1003C000000000000000000000000000000000002D -:1003D000000000000000000000000000000000001D -:1003E000000000000000000000000000000000000D -:1003F00000000000000000000000000000000000FD -:1004000000000000000000000000000000000000EC -:1004100000000000000000000000000000000000DC -:1004200063FFFC000000000000000000000000006E -:100430000000000000000000000000001FFC0000A1 -:100440001FFC0000E30005C81FFC00001FFC0000AB -:10045000E30005C81FFC00001FFC0000E30005C806 -:100460001FFFC0001FFFC000E30005C81FFFC00042 -:100470001FFFC018E30005C81FFFC0181FFFC018EA -:10048000E30005E01FFFC0181FFFC290E30005E076 -:100490001FFFC2901FFFC290E30008581FFFC290C9 -:1004A0001FFFC58CE3000858200000002000016AEF -:1004B000E3000B542000018020000180E3000CC009 -:1004C0002000020020000203E3000CC02000021CF8 -:1004D00020000220E3000CC420000220200002269D -:1004E000E3000CC82000023C20000240E3000CD0D6 -:1004F0002000024020000249E3000CD42000024CFE -:1005000020000250E3000CE02000025020000259BD -:10051000E3000CE42000025C20000260E3000CF029 -:100520002000026020000269E3000CF42000026C4D -:1005300020000270E3000D0020000270200002790C -:10054000E3000D042000028C2000028CE3000D105B -:100550002000029020000293E3000D10200002AC66 -:10056000200002B0E3000D14200002D0200002F2AF -:10057000E3000D18200003B0200003B0E3000D3CA1 -:10058000200003B0200003B0E3000D3C200003B0C6 -:10059000200003B0E3000D3C200003B0200003B0B6 -:1005A000E3000D3C200003B020006D94E3000D3CFF -:1005B00020006D9420006D94E3007720000000007F -:1005C00000000000000000001FFC00001FFC0000F5 -:1005D0001FFFC5901FFFC67020006D9820006D980A -:1005E000DEFFFE000000080CDEADBEEF1FFFC2A064 -:1005F0001FFCFE001FFFC0941FFFC5C0300000009D -:10060000003FFFFF8040000010000000080FFFFFC8 -:100610001FFFC26D000FFFFF804FFFFF8000000033 -:1006200000000880B000000560500000600000007D -:1006300040000011350000004100000010000001E2 -:100640002000000000001000400000000500000035 -:1006500080000019040000000000080010000005E0 -:10066000806000007000000020000009001FF800FA -:100670008000001EA0000000F800000007FFFFFF40 -:100680000800000018000000010080014200000086 -:100690001FFFC21D1FFFC0DC000100806040000082 -:1006A0001A0000000C0000001000000A00003000DA -:1006B000600008008000001C000100008000001A9B -:1006C00080000018FC0000008000000100004000D5 -:1006D000030000008000040050000003FFFFBFFF84 -:1006E0001FFFC3D400000FFFFFFFF000000016D073 -:1006F0000000FFF7A50000001FFFC4B01FFFC4618A -:100700000001000800000B20202FFF801FFFC455B0 -:1007100000002C00FFFEFFF800FFFFFF1FFFC57861 -:1007200000002000FFFFDFFF0000FFEF01001100CD -:100730001FFFC3D21FFFC590FFFFEFFF0000FFFBAD -:100740001FFFC6301FFFBEA0FFFFF7FF1FFFC064E3 -:100750000000FFFD1FFFC6200001FBD01FFFC5B03A -:100760001FFFC6601FFFC591E0FFFE001FFFC5A071 -:10077000000080001FFFC53C1FFFC5B41FFFC068FD -:100780001FFFC4D01FFCFFD8000100817FFFFFFFC7 -:10079000E1000600000027101FFCFE301FFCFE7069 -:1007A000E10002001FFFC5381FFFC5500003D090B5 -:1007B0001FFFC5642B5063802B5079802B50908095 -:1007C0002B50A6801FFFC4690100110F202FFE00CF -:1007D00020300080202FFF000000FFFF0001FFF805 -:1007E0002B50B2002B50B208000100102B50B180EA -:1007F0002B50B2802B50BA00000100112B50BD28A5 -:100800002B50BC802B50BDA020300000DFFFFE002D -:100810005000000200C0000002000000FFFFF7F4DB -:100820001FFFC06C000FF800044000000010000023 -:100830000C4000001C400000E00000A01FFFC5406D -:100840001FFD00081FFFC5541FFFC5681FFFC57CA3 -:10085000E1000690E10006EC00000000000000004E -:100860000000000000000000010000000000000087 -:100870000000000000000000201000402010004098 -:100880002010004020140080200C0000200C0000EC -:10089000200C000020100040201400802014008054 -:1008A00020140080201800C0201C0100201C010022 -:1008B000201C010020200140201800C0201800C08A -:1008C000201800C0201C0100201800C0201800C003 -:1008D000201800C0201C01002020014020200140E1 -:1008E00020200140202009402020094020200940EC -:1008F0002020094020240980FFFFFFFFFFFFFFFFAA -:10090000FFFFFFFF000000000000000000000000EB -:100910000000000000000000200054902000536000 -:1009200020005490200054902000529C2000529CA3 -:100930002000529C200050DC200050DC200050D4CD -:100940002000504020004EE820004CC820004A9C67 -:100950000000000000000000200054602000532C24 -:10096000200053D0200053D0200051842000518417 -:10097000200051842000518420005184200050CC5C -:100980002000518420004E0820004C7820004A4866 -:10099000000000000000000020000BE820003A30BA -:1009A000200004C02000463C20000BE0200041480D -:1009B000200003F0200045FC20004A2420003E5483 -:1009C00020003D70200039AC2000383C200035ACC0 -:1009D0002000310C20003BCC20002D6C2000280092 -:1009E000200067182000238C2000206C2000201895 -:1009F00020001D04200018182000154820000E2C8F -:100A000020000C2C2000110C200012F82000434084 -:100A100020003E0820000BF0200004C00000000071 -:100A200000000000000000000000000000000000C6 -:100A300000000000000000000000000000000000B6 -:100A400000000000000000000000000000000000A6 -:100A50000000000000000000000000000000000096 -:100A60000000000000000000000000000000000086 -:100A70000000000000000000000000000000000076 -:100A80000000000000000000000000000000000066 -:100A900000000000000000000000000032640000C0 -:100AA0000000000032640000640064006400640020 -:100AB00064006400640064000000000000000000A6 -:100AC0000000000000000000000000000000000026 -:100AD0000000000000000000000000000000000016 -:100AE0000000000000000000000000000000000006 -:100AF00000000000000000000000000000000000F6 -:100B000000001000000000000000000000000000D5 -:100B100000000000000000000000100000000000C5 -:100B200000000000000000000000000000432380DF -:100B300000000000000000000000000000000000B5 -:100B400000000000000000000000000000000000A5 -:100B500000000000005C94015D94025E94035F94C9 -:100B60000043000000000000000000000000000042 -:100B70000000000000000000000000000000000075 -:100B80000000000000000000000000000000000065 -:100B900000000000005C90015D90025E90035F9099 -:100BA00000530000000000000000000000000000F2 -:100BB0000000000000000000000000000000000035 -:100BC0000000000000000000000000000000000025 -:100BD00000000000009C94001D90019D94029E94D2 -:100BE000039F94040894050994060A94070B940043 -:100BF00043000000000000000000000000000000B2 -:100C000000000000000000000000000000000000E4 -:100C100000000000009C90019D90029E90071D9096 -:100C2000039F90047890057990067A90077B900056 -:100C30005300000000000000000000000000000061 -:100C400000000000000000000000000000000000A4 -:100C50000000000000DC94001D9001DD9402DE9491 -:100C600003DF94040494050594060694070794088A -:100C700008940909940A0A940B0B9400430000009D -:100C80000000000000000000000000000000000064 -:100C90000000000000DC9001DD9002DE900B1D9052 -:100CA00003DF9004B49005B59006B69007B790089E -:100CB000B89009B9900ABA900BBB9000530000009D -:100CC00063FFFC0020006B5010FFFF0A00000000D3 -:100CD00020006B7400D23110FFFE0A0000000000FB -:100CE00020006BBC00D33110FFFE0A0000000000A2 -:100CF00020006BFC00D43110FFFE0A000000000051 -:100D000020006C7000D53110FFFE0A0000000000CA -:100D100063FFFC00E00000A012FFF7822002825770 -:100D2000C82163FFFC12FFF303E83004EE3005C076 -:100D30003093209421952263FFFC00001FFFD00018 -:100D4000000400201FFFC5901FFFC670200A00117D -:100D5000FFFB13FFFB03E63101020016FFFA17FF4A -:100D6000FAD30F776B069060B4667763F85415B5C5 -:100D7000541A610F140063FFF90000006C1004C0E6 -:100D800020D10F006C1004C0C71AEF06D830BC2B5E -:100D9000D72085720D4211837105450B9572023380 -:100DA0000C2376017B3B04233D089371A32D12EEA7 -:100DB000FE19EEFEA2767D632C2E0A000882022820 -:100DC0000A01038E380E0E42C8EE29A67E6D4A0532 -:100DD00000208800308C8271D10FC0F0028F387FE4 -:100DE000C0EA63FFE400C0F1C050037E0CA2EE0E27 -:100DF0003D1208820203F538050542CB5729A67E2D -:100E00002FDC100F4F366DFA0500208800308CBCA7 -:100E100075C03008E208280A01058338030342C977 -:100E20003E29A67E0D480CD30F6D8A050020880050 -:100E3000B08C8271D10FC05008F53875C0C163FF06 -:100E4000BBC06002863876C0DA63FFD46C1012161D -:100E5000EED8C1F9C1E8C1C72B221E28221DC0D07F -:100E60007B81312920060BB702299CFA655008289E -:100E70002072288CFF28247264915C2AB0000CA890 -:100E80000C6481670EA90C6492B37FA13769AC2F03 -:100E90006000340000282006D7D0288CFACC572ACE -:100EA00020722AACFF2A24726481352AD0000CA952 -:100EB0000C6491640EAC0C64C31B7FA10768AC0783 -:100EC000C020D10F002D25028A32C0900A6E5065D5 -:100ED000E5B5292467090F4765F5B12C200C1FEEF5 -:100EE000B50CCE11AFEE29E286B4487983026005D5 -:100EF0008219EEB109C90A2992A36890078F2009C7 -:100F0000FF0C65F56E2FE28564F56865559628221D -:100F10001D7B8105D9B060000200C0908B9417EE54 -:100F2000A70B881487740B0B47A87718EEA509BB8D -:100F30001008770297F018EEA317EEA408A8010B8B -:100F400088020747021BEEA097F10B880298F22750 -:100F500090232B902204781006BB1007471208BB81 -:100F6000022890210777100C88100788020B88024E -:100F700017EE988B3307BB0187340B880298F397E1 -:100F80009997F48B9587399BF588968B3898F688D6 -:100F90009797F99BF898F717EE8F28E28507C7080F -:100FA0002D74CF08480B28E68565550F2B221E2887 -:100FB000221D7B89022B0A0064BF042CB00728B0D5 -:100FC00000DA2006880A28824CC0D10B8000DBA002 -:100FD00065AFE763FEE90000292072659E9C60040E -:100FE000E72A207265AEC36004DE00002EB0032C39 -:100FF0002067D4E065C1058A328C330AFF500C4566 -:1010000054BC5564F4EB19EE74882A09A9010988C7 -:101010000C64821FC0926000DD2ED0032A2067D4AA -:10102000E065A0D88A328B330AFC500B4554BC557E -:1010300064C4BE19EE69882A09A9017989D50BEA29 -:101040005064A4E30CEE11C0F02F16132E16168A6E -:10105000E78CE82A16128EE9DFC0AAEA7EAB01B15E -:10106000CF0BA8506583468837DBC0AE89991E78C0 -:101070009B022BCC012B161B29120E2B0A002916C2 -:101080001A7FC3077FC9027EAB01C0B165B49D8BD7 -:10109000352F0A002A0A007AC30564C3CB2F0A0140 -:1010A00065F4892B12162B1619005104C0C100CC0F -:1010B0001A2CCCFF2C16170CFC132C16182B121AFA -:1010C0002A121BDC50581974C0D0C0902E5CF42C2E -:1010D00012172812182F121B2A121A08FF010CAA25 -:1010E000018834074C0AAB8B2812192BC6162F86A1 -:1010F000082A86092E74102924672E70038975B179 -:10110000EA2A7403B09909490C659DB32B20672D19 -:10111000250265B3FA2B221E2C221D7BC901C0B00B -:1011200064BD9C2CB00728B000DA2006880A28820B -:101130004CC0D10B8000DBA065AFE763FD8189BAAD -:10114000B19965909788341CEE2598BA8F331EEEBE -:101150001E0F4F542FB42C8D2A8A320EDD020CAC98 -:10116000017DC9660A49516F92608A3375A65B2C6E -:10117000B0130AED510DCD010D0D410C0C417DC98F -:10118000492EB012B0EE65E3C6C0D08E378CB88A57 -:10119000368FB97CA3077AC9027EFB01C0D1CED9B4 -:1011A00088350AAD020E8E0878EB022DAC0189B7A6 -:1011B000DAC0AF9B79BB01B1CADCB0C0B07DA30778 -:1011C0007AD9027CEB01C0B164B161C09129246776 -:1011D000C020D10F00008ADAB1AA64A0C02C206719 -:1011E0002D250265C3111DEDF88A321EEDFD0DADF2 -:1011F000010EDD0C65D28A0A4E516FE20260028157 -:10120000C090292467090F4765F2F828221D7B89C1 -:10121000022B0A0064BCA82CB00728B000DA200614 -:10122000880A28824CC0D10B8000DBA065AFE76341 -:10123000FC8D00000CE9506492ED0CEF11C0802889 -:101240001611AFBF2F16198EF88BF7DAE08FF92B36 -:101250001610ABFB7FBB01B1EA0CA8506580D688A5 -:1012600037DCE0AF89991C789B022CEC012C161B13 -:1012700029120C2C0A0029161A7AE3077AE9027F50 -:10128000BB01C0C165C2A58B352C0A002A0A007AB1 -:10129000E30564E1CA2C0A0164CE0D60028E883435 -:1012A0001BEDCF98DA8F331EEDC80F4F542FD42C7F -:1012B0008C2A8A320ECC020BAB010CBB0C65BF0A28 -:1012C0000A49516E920263FF018A330AAB5064BE31 -:1012D000F92CD0130AEE510ECE010E0E410C0C412A -:1012E0000ECC0C65CEE42FD012B0FF65F26EC0B00C -:1012F0008E378CD88A362FD2097CA3077AC9027E12 -:10130000FB01C0B165BEC38835DBA0AE8E78EB01B2 -:10131000B1AB89D7DAC0AF9D79DB01B1CAC0C07B60 -:10132000A3077AB9027DEB01C0C165CE9DC09029AB -:101330002467C020D10F88378C3698140CE90C290B -:10134000161408F80C981D78FB07281214B088288A -:101350001614891D9F159B16C0F02B121429161AFE -:101360002B161B8B147AE30B7AE90688158E1678F8 -:10137000EB01C0F165F1BA29121A2F12118A352E2C -:10138000121B9A1AAFEE2F1210C0A0AF9F79FB016B -:10139000B1EE9F11881AC0F098107AE30A7EA90571 -:1013A0002A12017A8B01C0F164F0816001838936D1 -:1013B0008B3799170BE80C981F09C90C291615785B -:1013C000EB07281215B088281615D9C09A199E184F -:1013D0008A1F2E12152A161A2E161BDAC0C0E08C90 -:1013E000177F930B7FA90688188F1978FB01C0E13E -:1013F00065E13E29121A2F12138A352E121B9A1BF1 -:10140000AFEE2F1212C0A0AF9F79FB01B1EE9F1378 -:10141000881BC0F098127AE30A7EA9052A12037A83 -:101420008B01C0F165F10A2E12162E16192A121B15 -:10143000005104C0E100EE1AB0EE2E16170EFF1395 -:101440002F16180FCC01ACAA2F121A0EBC01ACFC3F -:101450007FCB01B1AA2A161B2C161A63FC5E000072 -:101460007FB30263FE3163FE2B7EB30263FC306305 -:10147000FC2A00006450C0DA20DBC0581648C020A7 -:10148000D10FC09163FD7A00C09163FA44DA20DB8A -:1014900070C0D12E0A80C09A2924682C7007581574 -:1014A00038D2A0D10F03470B18ED4FDB70A8287876 -:1014B00073022B7DF8D9B063FA6100002A2C74DB2B -:1014C00040580EB363FAE4000029221D2D25027B4B -:1014D0009901C0B0C9B62CB00728B000DA20068840 -:1014E0000A28824CC0D10B8000DBA065AFE7C0208A -:1014F000D10FC09163FBFF00022A025802440AA2E6 -:1015000002060000022A025802410AA20206000056 -:10151000DB70DA20C0D12E0A80C09E2924682C708E -:1015200007581517C020D10FC09463FBC9C096633C -:10153000FBC4C09663FBBF002A2C74DB30DC405B2D -:10154000FE11DBA0C2A02AB4002C200C63FF2700F0 -:101550008D358CB77DCB0263FDD263FC6D8F358EEC -:10156000D77FEB0263FDC563FC6000006C1004C014 -:1015700020D10F006C1004C020D10F006C10042B80 -:10158000221E28221DC0A0C0942924062A25027BE1 -:101590008901DBA0C9B913ED06DA2028B0002CB010 -:1015A0000703880A28824CC0D10B8000DBA065AFFE -:1015B000E7C020D10F0000006C10042C20062A2167 -:1015C0000268C80528CCF965812E0A094C6591048A -:1015D0008F30C1B80F8F147FB00528212365812774 -:1015E00016ECF529629E6F98026000F819ECF1295B -:1015F00092266890078A2009AA0C65A0E72A629DB6 -:1016000064A0E12B200C0CB911A6992D92866FD9FC -:10161000026000DB1DECE90DBD0A2DD2A368D007E6 -:101620008E200DEE0C65E0C7279285C0E06470BF88 -:101630001DECEE68434E1CECED8A2B0CAA029A704E -:1016400089200899110D99029971882A98748F320E -:101650009F75282104088811987718ECDE0CBF11BB -:10166000A6FF2DF285A8B82E84CF2DDC282DF68577 -:10167000C85A2A2C74DB40580E46D2A0D10FC02085 -:10168000D10F00000029CCF96490B12C206689317B -:10169000B1CC0C0C472C24666EC60260008509F89C -:1016A0005065807F1CECD38A2B0F08400B881008F4 -:1016B000AA020CAA029A7089200899110D99029920 -:1016C00071883398738C329C728A2A9A74893499FF -:1016D0007563FF7D00CC57DA20DB30DC4058151DE8 -:1016E000C020D10F00DA20C0B65815AC63FFE5006A -:1016F000DA205815AA63FFDC00DA20DB30DC40DD9D -:1017000050581638D2A0D10FC858DA20DB30581400 -:101710008A2A210265AFBDC09409A9022925026366 -:10172000FFB200002B21045814351DECAFC0E02E91 -:1017300024668F302B200C0F8F1463FF662921380D -:10174000C08879830263FF5B2C20662B2104B1CC17 -:101750000C0C472C24665814291DECA3C0E02E2441 -:10176000668F302B200C0F8F1463FF376C1004C072 -:10177000B7C0A116ECA015EC92D720D840B822C073 -:10178000400535029671957002A438040442C94B95 -:101790001AEC8519EC8629A67EC140D30F6D4A0547 -:1017A00000808800208C220A88A272D10FC05008C5 -:1017B000A53875B0E363FFD76C1006931394112915 -:1017C0002006655288C0716898052A9CF965A29820 -:1017D00016EC792921028A1309094C6590CD8AA05B -:1017E0000A6A512AACFD65A0C2CC5FDB30DA208CDE -:1017F000115814D8C0519A13C7BF9BA98E132EE25B -:101800000968E0602F629E1DEC6A6FF80260008438 -:101810002DD22668D0052F22007DF9782C629DC735 -:101820009064C0709C108A132B200C2AA0200CBD41 -:1018300011A6DD0A4F14BFA809880129D286AF88F6 -:10184000288C09798B591FEC5C0FBF0A2FF2A36813 -:10185000F0052822007F894729D285D490659075AC -:1018600060004300002B200C1FEC540CBD11A6DDC2 -:1018700029D2860FBF0A6E96102FF2A368F0048853 -:10188000207F890529D285659165DA20581543C9DD -:101890005C6001FF00DA20C0B658154060000C0003 -:1018A000C09063FFB50000DA2058153C6551E48D07 -:1018B000138C11DBD08DD0022A020D6D515813AD5F -:1018C0009A1364A1CEC75F8FA195A9C0510F0F478E -:1018D0009F1163FEFD00C091C0F12820062C2066F8 -:1018E000288CF9A7CC0C0C472C24666FC6098D13E5 -:1018F0008DD170DE02290A00099D02648159C9D385 -:101900008A102B21045813BD8A13C0B02B24662ED5 -:10191000A2092AA0200E28141CEC338D1315EC27E5 -:10192000C1700A773685562DDC28AC2C9C12DED08F -:10193000A8557CD3022EDDF8D3E0DA40055B02DC4B -:10194000305BFF8AD4A028200CB455C0D02B0A8865 -:101950002F0A800C8C11A6CC29C285AF3FAB9929E8 -:10196000C6851CEC1CDEF0AC882D84CF2812022921 -:10197000120378F3022EFDF8289020D3E007880C9C -:10198000C170080847289420087736657FAB891313 -:1019900013EC1A8990C0F47797491BEC18C1CA2838 -:1019A00021048513099E4006EE1187530488118592 -:1019B000520E88020C88029BA09FA18F2B9DA59898 -:1019C000A497A795A603FF029FA22C200C1EEC0152 -:1019D000AECE0CCC1106CC082BC2852DE4CF2BBC8F -:1019E000202BC6852A2C748B11580D69D2A0D10FDB -:1019F00028203DC0E07C877F2E24670E0A4765A023 -:101A00007B1AEBFF88201EEBED8F138EE48FF4081A -:101A100088110A88020F8F14AFEE1FEBFA98910F0E -:101A2000EE029E901EEBF9C0801AEBEA2CD285AA3A -:101A3000BAB8CC28A4CF2CD6852C21022F20720E28 -:101A4000CC02B1FF2F24722C2502C020D10F8713A6 -:101A5000877007074763FD6E282138C099798B028C -:101A600063FE9ADDF063FE9500DA20DB308C11DD39 -:101A70005058155CD2A0D10FC0E163FF7A8B138C54 -:101A800011DD50C0AA2E0A802A2468DA205813BC1F -:101A9000D2A0D10FC020D10F6C1006292102C0D0D6 -:101AA0007597102A32047FA70A8B357FBF052D2535 -:101AB000020DD902090C4C65C18216EBBE1EEBBCAF -:101AC00028629EC0FA78F30260018829E2266890B5 -:101AD000078A2009AA0C65A17A2A629DDFA064A169 -:101AE000772B200C0CBC11A6CC29C286C08C798324 -:101AF0000260015719EBB109B90A2992A36890074E -:101B0000882009880C65814327C2851CEBB364716A -:101B10003A8931098B140CBB016FB11D2C20669FD3 -:101B200010B1CC0C0C472C24666EC6026001400933 -:101B3000FF5065F13A8A102AAC188934C0C47F97E7 -:101B40003C18EBB31BEBB28F359C719B708B209DC7 -:101B50007408BB029B72C08298751BEBAE0F0840E5 -:101B60009B730F881198777FF70B2F2102284A006B -:101B700008FF022F2502C0B4600004000000C0B0BE -:101B80007E97048F362F25227D97048837282521BC -:101B90007C9736C0F1C0900AF9382F3C20090942E1 -:101BA00064908619EB8018EB8128967E00F08800FF -:101BB000A08C00F08800A08C00F08800A08C2A6225 -:101BC0009D2DE4A22AAC182A669D89307797388F1C -:101BD000338A3218EB8A07BE0B2C2104B4BB04CC29 -:101BE0001198E0C08498E1882B9DE59AE69FE71A5A -:101BF000EB82099F4006FF110FCC020A880298E28F -:101C0000C1FC0FCC022CE604C9B82C200C1EEB71D1 -:101C10000CCA11AECC06AA0829A2852DC4CF09B9D9 -:101C20000B29A685CF5CC020D10FC081C0900F8941 -:101C300038C08779880263FF7263FF6600CC57DA89 -:101C400020DB30DC405813C3C020D10FDA205814F9 -:101C50005363FFE8C0A063FE82DA20C0B658144F79 -:101C600063FFD900DB402A2C74580CC9D2A0D10FD5 -:101C70008A102B21045812E11EEB4EC0D02D246691 -:101C800063FEB1006C1006D62019EB491EEB4B2801 -:101C9000610217EB4808084C65805F8A300A6A5178 -:101CA00069A3572B729E6EB83F2A922668A0048CB7 -:101CB000607AC9342A729D2C4CFECAAB2B600CB6DC -:101CC0004F0CBD11A7DD28D2860EBE0A78FB269CDC -:101CD000112EE2A32C160068E0052F62007EF91594 -:101CE00022D285CF2560000D00DA60C0B658142BD3 -:101CF000C85A60010F00DA60581428655106DC40AC -:101D0000DB308D30DA600D6D5158129AD3A064A08B -:101D1000F384A1C05104044763FF6D00C0B02C6080 -:101D2000668931B1CC0C0C472C64666FC602709684 -:101D30000A2B61045812B1C0B02B64666550B42AF6 -:101D40003C10C0E7DC20C0D1C0F002DF380F0F42EA -:101D500064F09019EB1418EB1528967E8D106DDA4F -:101D60000500A08800C08CC0A089301DEB247797A7 -:101D70005388328C108F3302CE0BC02492E1226143 -:101D8000049DE00422118D6B9BE59FE798E61FEB15 -:101D90001A0998400688110822020FDD02C18D9DA4 -:101DA000E208220292E4B4C22E600C1FEB0A0CE897 -:101DB00011A7882C8285AFEE0C220B2BE4CF228654 -:101DC00085D2A0D10F28600CD2A08C1119EB020C87 -:101DD0008D11A988A7DD2ED2852B84CF0ECC0B2C9C -:101DE000D685D10FC0F00ADF387FE80263FF6C634D -:101DF000FF6000002A6C74C0B2DC20DD4058128FF6 -:101E0000C0B063FF63C020D10F0000006C10042C31 -:101E1000221D2A221EC049D320293006243468C03E -:101E2000407AC105DDA060000200C0D06E9738C0C6 -:101E30008F2E0A802B3014C0962934060EBB022E3A -:101E400031022B34147E8004243502DE407AC10E28 -:101E5000C8ABDBD0DA302C0A00580AE52E31020E6E -:101E60000F4CC8FEC020D10F6895F8283102080831 -:101E70004C658FEF1AEAD01CEACE2BA29EC09A7B4B -:101E80009B462BC22668B0048D307BD93B29A29D8E -:101E9000C0E3CB9394901BEAE02D31049B9608DDC0 -:101EA000110EDD029D979D9112EADDC0E524C4A2CA -:101EB0002E34062F310228A29D02FF02288C3028E2 -:101EC000A69D2F3502C020D10FDA30C0B65813B30B -:101ED000C020D10F6C1006292006689805289CF9AF -:101EE00065825D29210209094C659210CD51DB30D4 -:101EF000DA20044C02581317C051D3A0C7AF2A36BA -:101F00000AC0E019EAAD1DEAB31FEAAC8A3A16EA44 -:101F1000A9B1AC64C13528629E6F88026001F129C5 -:101F2000DC332992266890078B2009BB0C65B1E051 -:101F300027629DC08E6471D82B200C0CBC11A6CCDE -:101F400029C2867983026001D219EA9B09B90A295C -:101F500092A3971068900828220009880C6581BB1D -:101F600027C2856471B5292006299CF96491EC2C5F -:101F700020668931B1CC0C0C472C24666EC60260F9 -:101F800001A109F85065819B883689F4088C14AC4E -:101F9000991CEA8B0C99022C2104997019EAA1086A -:101FA00008479971892A09881008990218EA9E0839 -:101FB000990299722830132930120488100699105A -:101FC00008990228302C9A740C881008C8020988D5 -:101FD00002987389379975883898768A39C0819ABA -:101FE000771AEA918935987B99780989140A9902B8 -:101FF000997A8A30893277A73618EA808F33987CAD -:10200000C084987D882B2E76112976122F7613198D -:10201000EA7A0A9F4006FF1104CA110988020FAA32 -:1020200002987EC1F90FAA022A7610C0AA600001A8 -:10203000C0A6ADBF0CBC11A6CC29C2852EF4CF0919 -:10204000A90B29C685655107C020D10F2B200C0C88 -:10205000BC1106CC0828C28609B90A6F8902600142 -:102060002E2992A36890082A220009AA0C65A11FB4 -:102070002AC28564A11928203D08284064808C84E8 -:102080003504841464408485F574537F8436048455 -:1020900014644077745374293013C08C79886CC0F1 -:1020A000902924670908476580ED882089F48435E4 -:1020B0001FEA55048414A4940F440294A014EA5017 -:1020C00008881104880298A1843698A3048414A473 -:1020D000990F990299A219EA4CADB428C2852E44F1 -:1020E000CF288C1028C6852821022F20720988024B -:1020F000B2FF2F2472282502C020D10F00CC57DA5E -:1021000020DB30DC40581293C020D10FC09163FF18 -:102110008FDA20C0B658132163FFE100DA2058138C -:102120001F63FFD88A102B21045811B41DEA2A1FFF -:10213000EA232B200CC0E02E24668A3A63FE480076 -:1021400000DA20DB30DC40DD505813A6D2A0D10FDE -:102150002A2C74DB40580B8ED2A0D10F292138C015 -:102160008879830263FE202A12002C20662B21042A -:102170002CCC010C0C472C24665811A01DEA161F0C -:10218000EA0F2B200CC0E02E24668A3A63FDF8008B -:10219000DA2058130263FF64DA205BFF1CD2A0D15F -:1021A0000F0000006C10089515C061C1B0D9402A1D -:1021B000203DC0400BAA010A64382A2006291606D1 -:1021C00068A8052CACF965C33B1DE9FC6440052FEC -:1021D000120564F29C2621021EE9F806064C65628F -:1021E000E315E9F46440D98A352930039A140A9931 -:1021F0000C6490CC2C200C8B149C110CCC11A5CC15 -:102200009C122CC286B4BB7CB3026002D38F110E29 -:10221000FE0A2EE2A368E0098620D30F0E660C6545 -:1022200062BE88122882856482B6891464905EDA60 -:1022300080D9308C201EE9F21FE9F31DE9E08B14F0 -:102240008DD4D4B07FB718B88A293C10853608C61B -:10225000110E66029681058514A5D50F550295804D -:102260000418146D8927889608CB110888140EBBB2 -:1022700002A8D8299C200F88029BA198A088929B35 -:10228000A3088814A8D80F880298A22AAC1019E9CC -:10229000DEC0C08F141EE9CF86128D11286285AE74 -:1022A000DD08FF0B2CD4CF2821022F66858B352A21 -:1022B0002072098802ABAA2825022A2472C020D1E4 -:1022C0000F29529E18E9BB6F9802600208288226E7 -:1022D00068800829220008990C6591F92A529DC14D -:1022E000CA9A1364A1EF2B200C2620060CB811A566 -:1022F000882D82860EBE0A7DC3026002022EE2A3F2 -:1023000068E0082F22000EFF0C65F1F3288285DEBD -:10231000806481FF9810266CF96461FF2C20668828 -:1023200031B1CC0C0C472C24666EC6026001BC088F -:10233000FD5065D1B617E9BD19E9A21AE9A92C210A -:10234000048B2D2830102F211D0C88100BFB090C3D -:1023500088020A880209BB026441528910C04D9B61 -:1023600090979198928D35D9E064D06CD730DBD0BE -:10237000D8307FD713273C10BCE92632168C39960B -:10238000E69CE78A37B4389AE80B13146430492A7C -:10239000821686799A9696978C778A7D9C982B825E -:1023A000172C7C209A9A2A9C189B99867BB03BB864 -:1023B000896DB9218BC996A52692162AAC18B899B1 -:1023C0009BA196A08BC786CD9BA22B921596A49B12 -:1023D000A386CB2CCC2026A605C0346BD4200D3B85 -:1023E0000C0DD8090E880A7FB705C0909988BC8863 -:1023F000C0900B1A126DAA069988998B288C18C068 -:10240000D01BE98C1CE98B16E981B1FF2A211C2322 -:10241000E6130F0F4F26E6122F251D7FA906C0F0E9 -:10242000C08028251D05F6111AE97A8F202BE615A4 -:102430002CE6162DE61726E6180AFA022AE61429D3 -:102440002006299CF96490FF29200C8D15C0801A64 -:10245000E9610C9C11AA99A5CCDA202BC28528949D -:10246000CF0B4B0B2BC685C0B08C1658118AD2A04F -:10247000D10F8A356FA548D8308BD56DA90C8A86C7 -:102480000A8A14CBA97AB337288C10C08028246715 -:10249000080B4765B112DA20DB302C12065811AD5B -:1024A000D3A0C0C1C0D02DA4039C1563FD268636E1 -:1024B00064610C8910C04D9B909791989263FEA423 -:1024C000C08163FFC78A15CCA7DA20DB308C165891 -:1024D00011A1C020D10FDA20C0B658123063FFE43A -:1024E00000DA208B1158122D63FFD9009E178A1332 -:1024F0002B21045810C28E17C0B02B246663FE3403 -:10250000C08063FE09DA20DB308C16DD505812B52E -:10251000D2A0D10FDA2058122163FFA82D2138C094 -:10252000C87DC30263FE0D8A132B21042C206698FC -:1025300017B1CC0C0C472C24665810B08E17C0D0A5 -:102540002D246663FDEE0000262138B06606064F96 -:10255000262538656EF128206A7F870508294164A1 -:1025600090A5C0D01BE92619E93426200723E61BD5 -:10257000B16609FA022BE61A28200A2DE61D2AE682 -:102580001E09880228E61C882606064728E6202B16 -:10259000220826E53E2BE6212D24072C20062A20A2 -:1025A0006468C347B44463FE9EDB30DA208D15C0F7 -:1025B000CE2E0A802C24688C165810F1D2A0D10F90 -:1025C0008E102A321616E8FD0A2A1486662BE612A9 -:1025D00097E127E61328E614AA6609660296E02E1C -:1025E000EC4869ED50C14663FD7A000064AFB41950 -:1025F000E8F328201689920A880C00910400881AB2 -:10260000A8B8982963FF9C002B21046EB81E2C20CB -:1026100066B8CC0C0C472C2466C9C09E178A135888 -:1026200010778E17C0348F20C0D02D2466C0682646 -:10263000240663FF2C008D35C08064D04AD9E0DCCD -:1026400030DBE0DF301AE8FDB188B4FF17E8FD8623 -:10265000C9249DFF8DC82CCC102D46300767012D55 -:1026600046320A66011DE8F7264631AD6D2D463328 -:1026700026F21597B796B684C3BCBB94B58D3529A1 -:102680009C107D83C22F211DC14663FD4B000000BD -:102690006C1006292006289CF86582BF2921022B90 -:1026A000200C09094C6590E116E8C30CBA11A6AAE2 -:1026B0002DA2862C0A127DC30260028C19E8BF0984 -:1026C000B90A2992A36890078C2009CC0C65C278BE -:1026D00029A2856492722D629E1AE8B56FD80260B5 -:1026E000026E2AA22629160168A0082B22000ABB26 -:1026F0000C65B25C29629DC18C6492542A21200A27 -:10270000806099102C203CC7EF000F3E010B3EB1BA -:10271000BD0FDB390BBB098F260DBD112DDC1C0D48 -:102720000D410EDD038E27B1DD0D0D410FEE0C0DB9 -:10273000BB0B2BBC1C0BB7027EC71C2C21257BCBF3 -:10274000162D1AFC0CBA0C0DA16000093E01073EC3 -:10275000B1780987390B770A77EB0260020A2C21DE -:1027600023282121B1CC0C0C4F2C25237C8B29B0A4 -:10277000CD2D2523C855DA20DB3058106F292102D2 -:10278000CC96C0E80E9E022E2502CC57DA20DB3014 -:10279000DC405810F0C020D10F2C20668931B1CC1C -:1027A0000C0C472C24666EC6026001D309FD5065EF -:1027B000D1CD2F0A012E301129221464E0112822D4 -:1027C0001B090C4400C10400FA1A0A880228261BBF -:1027D0002E3010C0A0C0B0941295131CE878883039 -:1027E0002CC022088D14778704C0F10CFA38C04140 -:1027F000C0F225203CC0840858010F5F010F4B3800 -:1028000005354007BB10C0F0084F3808FF100FBB5C -:102810000228ECFEC0F0084F38842B0BA8100AFFEA -:10282000102A21200F88020B880208440218E8862B -:102830008F110844022821250A2A14082814048824 -:10284000110A88022A210494F08B2004E41008BBAA -:102850001104BB02C04A04BB029BF1842A08AB11DD -:102860000BEB0294F40A54110B44020555100D1B96 -:102870004094F707BB100B5502085502C08195F62E -:102880008433C05094F3B1948B3295F898F99BF24D -:10289000C080C1BC24261499FA9BF598FB85389515 -:1028A000FC843A94FD8B3B9BFE883998FF85352547 -:1028B000F6108436851324F6118B3784122BF6120A -:1028C000C0B064C07E89307797438D3288332E3014 -:1028D000108F111CE84A0999400699112CF614C072 -:1028E000C42CF6158C2B2DF61A28F61B2BF6190482 -:1028F000A81109880208EE0219E840C18008EE021A -:1029000009C90229F6162EF618C09E600001C09A69 -:102910002F200C18E8300CFE11A8FFA6EE2DE28542 -:102920002BF4CF0D9D0B2DE685C87F8A268929A71C -:10293000AA9A260A990C090948292525655050C0EC -:1029400020D10F00C09A63FFC6DA2058111463FE2D -:1029500038DA20C0B658111163FE2E0068973C2B60 -:102960009CFD64BE24C020D10FDA20DB705810CD4E -:10297000C0C0C0D10ADA390ADC3865CDE063FE098F -:102980008A102B2104580F9DC0B02B246663FE21B2 -:10299000DB402A2C7458097ED2A0D10FDA20580FC0 -:1029A000A263FCF76C1004C020D10F006C10042946 -:1029B0000A801EE8261FE8261CE7FF0C2B11ACBB83 -:1029C0002C2CFC2DB2850FCC029ED19CD0C051C0C6 -:1029D0007013E82214E82118E81F2AB285A82804F9 -:1029E000240A234691A986B8AA2AB685A9882784ED -:1029F0009F25649FD10F00006C100AD6302830103C -:102A0000292006288CF964829B68980B2A9CF9651A -:102A1000A1B2022A02580F8489371BE7E8C89164E3 -:102A2000520E2A21020A0C4C65C2588D3019E7E17A -:102A300074D7052E212365E29E2F929E1AE7DD6F43 -:102A4000F8026002532AA22668A0082C22000ACCB1 -:102A50000C65C2442A929D64A23E9A151FE7D78D49 -:102A600067C1E6C8DD2B620618E7D564B00528808B -:102A7000217B8B432B200C18E7CF0CBC11A8CC2951 -:102A8000C28679EB460FBE0A2EE2A368E0052F222C -:102A9000007EF9372CC2859C1864C2332B212F8706 -:102AA000660B7B360B790C6F9D266ED2462C203D33 -:102AB0007BC740CE5560001E2A200CC1B28C205826 -:102AC00010F79A1864A2458D6763FFCFC0C063FFFB -:102AD000C5D7B063FFD300C0E06000022E60030ED4 -:102AE000DB0C6EB20EDC700CEA11AA6A2AAC20581C -:102AF0000199D7A0DA20DB70C1C82D212058109190 -:102B00008C268B279A160CBB0C7AB3348F188963EA -:102B100099F3886298F28E659EF82D60108A189D50 -:102B20001768D729C0D09DA92C22182B22139CAB43 -:102B30009BAA97A58E667E7302600097CF586000AF -:102B40001FDA208B1658105765A13863FFBDC0816E -:102B5000C0908F18C0A29AF999FB98FA97F563FF75 -:102B6000D2DB30DA20DC40580FFBC051D6A0C0C009 -:102B70002BA0102CA4039B172C1208022A02066B10 -:102B800002DF702D60038E179D149E100CDD11C0A6 -:102B9000E0AD6D2DDC205801188C148B16ACAC2CDC -:102BA00064038A268929ABAA0A990C9A26886609A1 -:102BB000094829252507880C98662F2218A7FF2F7A -:102BC000261863FE96DA20DB30DC40DD5058110514 -:102BD000D2A0D10FC0302C20668961B1CC0C0C473B -:102BE0002C24666EC6026000D2C03009FD5065D04C -:102BF000CA8E6764E069647066DB608C18DF70DA27 -:102C0000202D60038E170CDD119E10AD6D2DDC2084 -:102C10001EE78D5800F9232618DA208B16DC402F8A -:102C20002213DD50B1FF2F2613580F9AD2A0D10FD7 -:102C30000028203D084840658DE76F953EDA308DCD -:102C4000B56D990C8CA80C8C14CACF7CD32D2AACF2 -:102C500010C090292467090D4764DDC5600092000B -:102C60002C1208066B022D6C20077F028E17DA20CB -:102C70009E101EE77458007D63FF9A00C09163FFA9 -:102C8000D1000000655081DA20DB60DC40580FB1D4 -:102C9000C020C0F02FA403D10FDA20C0B658103FD7 -:102CA00063FFE000006F950263FD6CDA20DB30DC2F -:102CB00040DD50C4E0580F32D2A0D10F8A152B212D -:102CC00004580ECE232466286010981763FF210055 -:102CD000DA2058103263FFABC858DB30DA20580FC7 -:102CE000162A210265AF9CC09409A9022925026316 -:102CF000FF91DB30DC40DD50C0A32E0A802A24681F -:102D0000DA20580F1FD2A0D10FC020D10FDA202B0C -:102D1000200C58104763FF6B6C1004282006C0621B -:102D2000288CF8658125C050C7DF2B221BC0E12A03 -:102D3000206B29212300A104B099292523B1AA00E1 -:102D4000EC1A0BC4010A0A442A246B04E4390DCCA2 -:102D5000030CBB012B261B64406929200C1BE715C3 -:102D60000C9A110BAA082FA2861BE7136FF90260B9 -:102D700000B60B9B0A2BB2A368B0082C22000BCC28 -:102D80000C65C0A42BA2851DE73664B09B8C2B2458 -:102D900021040DCC029CB08820C0C50888110C8885 -:102DA0000298B1882A08441198B48F3494B79FB51B -:102DB000C0401EE7082DA2850E9E0825E4CF2DDC1D -:102DC000282DA68529210209094C68941A689820A3 -:102DD000C9402A210265A00B2A221E2B221D7AB18E -:102DE0000265A079C020D10F2C212365CFDE6000C1 -:102DF000082E21212D21237EDBD52B221E2F221DE3 -:102E00002525027BF901C0B064BFC413E6E92CB0EC -:102E10000728B000DA2003880A28824CC0D10B8032 -:102E200000DBA065AFE763FFA62A2C74C0B02C0AB4 -:102E300002580E081CE70C9CA08B2008BB1106BB97 -:102E4000029BA1893499A263FF790000262468DAE5 -:102E500020DB30DC40DD50581063D2A0D10FDA20E7 -:102E60002B200C580FCEC020D10F00006C1006078D -:102E70003D14C080DC30DB40DA20C047C02123BCD9 -:102E800030032838080842774001B1DD64815A1EBA -:102E9000E6C519E6C629E67ED30F6DDA050050882F -:102EA00000308CC0E0C02025A03C14E6C4B6D38F0F -:102EB000C0C0D00F87142440220F8940941077F7A8 -:102EC00004C081048238C0F10B2810C044C0220421 -:102ED000540104FD3802520102FE3808DD10821C44 -:102EE00007EE100E6E020EDD02242CFEC0E004FE82 -:102EF000380AEE100E88020D88028DAB1EE6B4086B -:102F0000D8020E880298B0C0E80428100E5E018432 -:102F1000A025A125084411084402052514045511D3 -:102F2000043402C0810E8E3994B18FAA84109FB4EC -:102F300075660C26A11FC0F2062614600009000069 -:102F400026A120C0F20626140565020F7701078727 -:102F50003905E61007781008660206550295B62571 -:102F6000A1040AE61108581108280208660296B75B -:102F7000C060644056649053067E11C0F489C288D4 -:102F8000C30B340B96459847994618E69B9F41041E -:102F900059110E99021FE699020E4708D80298426D -:102FA0000E99029F40C1E00E990299442FA00CB4E3 -:102FB000380CF91114E6881EE67FA4FFAE992E9214 -:102FC0008526F4CF0E880B289685D10F2BA00C1FD9 -:102FD000E6791CE6800CBE11ACBBAFEE2DE2852677 -:102FE000B4CF0D3D0B2DE685D10FC0800528387874 -:102FF000480263FEA263FE966C1006C0C06570F1C5 -:103000008830C030088714778712C0B0C0A619E690 -:103010006B299022C030CC97C031600003C0B0C093 -:10302000A6C0E0C091C0D4C08225203C0B3F1097C1 -:1030300012831CC0700858010D5D01089738C080CC -:103040000B9838077710048810086802087702C0C8 -:10305000800D98382D3CFE0888100D9E388D2B0A67 -:10306000EE1008EE0207EE020CB8100FDD02053B71 -:10307000400EDD029D408920043D100899110D99F4 -:10308000022D210409A90208DD119941872A05B9F9 -:10309000100D3D020ABB110DBB02087702974428B0 -:1030A00021258712082814048811071E4007EE10F6 -:1030B0000E990275660926211F0626146000060077 -:1030C0002621200626140868029B47098802984694 -:1030D00029200CD2C0C0800C9E111BE63E1FE63595 -:1030E000AB99AFEE2DE2852894CF0DAD0B2DE68583 -:1030F000D10FDD40C0A6C0B08E51CAE0B2AAB1BBAC -:103100002DDC108F500E7836981008770C9FD898C9 -:10311000D989538F52991199DB9FDA7E8309B1CCFB -:10312000255C10C97763FFCF88108D1108E70C97D5 -:1031300051AD8DD7F078DB01B1F79D5397528830B0 -:10314000C030088714088840648ED565BEC963FE08 -:10315000BC0000006C1004D720B03A8820C0308238 -:1031600021CAA0742B1E2972046D080FC980C99151 -:103170008575B133A2527A3B0B742B0863FFE900CB -:10318000649FECD10FD240D10F0000006C100AD622 -:10319000302E3027D950DA4015E6092430269A150A -:1031A00029160464E0026493732920062A9CF865BA -:1031B000A3CE2A2102270A040A0B4C65B3978C3050 -:1031C00074C7052D212365D4A0C0A62B0A032C2289 -:1031D00000580F0B64A3B917E5F78E389A1664E30D -:1031E000BA2F6027285021C9F37E8311C2B08C20EA -:1031F0002A200C580F2AD7A0CDA16004A200C2B08B -:103200008C202A200C580EFED7A064A4862F212ED5 -:103210008B680FBF360FB90C6F9D54296027D5B04E -:103220006E920528203D7B8F4CDA20DB50C1C42DE7 -:10323000211F580EC48B269A189A1989272AAC3850 -:103240000B990C7A93538963C08099738F62987835 -:103250009F728E659E798D679D7B8C6695759C7A35 -:103260008E687E53026000B18B1465B050600038E8 -:10327000DBF063FFA5008A14C9A92E60030E9B0C26 -:103280006EB2A5DC500CEA11AA6A2AAC285BFFB129 -:10329000D5A063FF93C0E063FFE2DA208B18580EDD -:1032A0008165A2B163FF9E0000DA20DB308C1558E7 -:1032B0000E29D6A0C0C0C0D12D16042CA403DC70EA -:1032C000DA20DB60DF502D6003C0E09E109D171EEA -:1032D000E5D20CDD110D6D082DDC285BFF478E66F5 -:1032E0008F678817AF5FA8A828640375FB01B1EE4C -:1032F0008A189E669F6789268829AA9909880C9949 -:10330000268E6808084805EE0C28252515E5AC9E94 -:103310006865EECC63FEE6000000C9432F21232B35 -:1033200021212FFC010F0F4F2F25237FBB026003AC -:10333000142C20668961B1CC0C0C472C24666EC617 -:103340000260022809FD5065D22264E1B62E602792 -:1033500064E1B0DC70DF50DA20DB601EE5C32D6075 -:1033600003C08098100CDD11AD6D2DDC285BFF22B1 -:10337000644181C0442B0A008C202A200C580EA0E6 -:103380000AA70265A00FC0B02C22002A200C580EFC -:103390009CD7A064AFEFDA20C1BCC1C82D21208F1B -:1033A000188E268929AFEE9E260E990C0909482908 -:1033B0002525580E64C090C050C0C288609A191E5E -:1033C000E57FC0A12EE022088F14778704C0810E0C -:1033D0008938C0800B93102D203C2921200CDC0162 -:1033E00004DB010929140BA8380CA5380D3D401C3D -:1033F000E5968B2B088810075510085502053302F7 -:103400002821250F154003BB020CBB0207551005F0 -:10341000D3100828140ADD11048811098802053325 -:10342000022921040833029B70C0808A201BE58F8B -:1034300008AA110BAA029A71C0A1852A93769574E5 -:1034400008931103DD020ADD029D778C63C1DC9CC9 -:10345000738B6298789A799B72232214C0C0B1351D -:103460002526149C7B9D75937A2B621A9B7C2A627D -:103470001C9A7D28621D987E25621B957F2362170A -:103480002376102D62182D76112C62192C76126479 -:10349000E0B98E6077E73DC0FE13E5571DE558C1E2 -:1034A000818A628B630495110E9C4006CC110C55E9 -:1034B00002247615085502C0802D76148D2B2B76AC -:1034C0001B2A761A28761925761803DD022D761622 -:1034D0006000030000C0FA2E200C19E53E18E53507 -:1034E000A9E90CEE11A8EEC0802DE2852894CF0D3D -:1034F000FD0B2DE685DA208B198C158D14580D6582 -:10350000D2A0D10FDC70DF50DB602D6C28C0A01E74 -:10351000E5569A10DA205BFE5563FE53002B203DE2 -:103520000B4B4065BC826FE527DA308F556DE90C97 -:103530008EAA0E8E14C9E87EF3162AAC10C090290C -:103540002467090F4764FC6060015F00C0FA63FFF5 -:1035500085C09163FFE88814658168DA20DB608CA0 -:1035600015580D7CC020C09029A403D10F8A162BBA -:103570002104580CA2C0A02A24668E6863FDCA00EC -:10358000002B9CF965B0FDDA20580CA763FC2200E3 -:1035900000DA20C0B6580E0163FFBA002B200C0CD5 -:1035A000BE11A7EE2DE286C1C27DC30260011819CB -:1035B000E50209B90A2992A36890082A220009AAFB -:1035C0000C65A10326E2856460FD2C20668931B17B -:1035D000CC0C0C472C24666FC60270960C8A162BF6 -:1035E0002104580C86C0D02D24668E3077E74D1C00 -:1035F000E5021BE5028F328833C0A42D21040E9909 -:103600004006991104DD1109DD029A61C19009DDBE -:10361000029B60C0908B2B9D649F66986799650C98 -:10362000BB029B6228200C1AE4EBAA8A0C8811A723 -:10363000882F828529A4CF2FFC202F86858A1465A8 -:10364000A0A6C020D10FB0FC8B142C2523C8B70234 -:103650002A02066B02580CB82A210265AEF7C0D8C0 -:103660000DAD022D250263FEEC008E14C8E8DA20B1 -:10367000DB30580CB12A210265AEDA07AF022F25E4 -:103680000263FED100DA20DB308C158D14580E5504 -:10369000D2A0D10FDA202B200C580DC063FEB6004B -:1036A000DA202B200C580DE263FEAADA20DB308CE6 -:1036B000152D12042E0A80280A00282468580CB000 -:1036C00063FAE500C020D10FDA20580DB48914CD7B -:1036D00092DA20DB308C15580D1FDBA0C020C0A073 -:1036E0002AB403D10FC020D10F2A2C748B15580691 -:1036F00028D2A0D10F0000006C100C2821029410D9 -:1037000008084C6583621FE4AB29F29E6F98026043 -:1037100003661DE4A729D2266890082A220009AA78 -:103720000C65A3542CF29D64C34E2B200C0CB611D7 -:10373000AF66286286C1EC78E30260034619E49E16 -:1037400009B90A2992A36890078A2009AA0C65A3DF -:103750003224628564432CC0E12A3109C0702724D9 -:103760006689359A11992A88369912982B89379843 -:1037700013992C883899140858149815982D89395C -:103780002A25042E251D29251C283028C0922824EE -:103790003C2A302908084798160989012A243D2A1D -:1037A000311599170A094109A90C299CEC29251FF3 -:1037B0007E87192D2A000DA06000083E010A3EB147 -:1037C000AD08DA390EAA110A990C29251F2A211FE2 -:1037D00018E4A80A8160C1D0941A951B01083E0024 -:1037E000053EB184054839843C259CFC0D8836296A -:1037F000201408AA1C8D3D2726182E26132E2614C9 -:103800002E261527261B2E246B27246727246808BD -:10381000581C0909432924142932112A252E282548 -:103820002F27252427252527252C27252325252037 -:103830002425212D2522841A2D211C851B6FD202BF -:10384000600209C0A099186D080AB1AA00A104007D -:10385000E91A7D9B0263FFEE8918C080C0E1C07049 -:10386000C0D29B1D951B961C9C1E16E4722C203DFD -:1038700015E4820C0B400DCC010BE7381DE4640A03 -:1038800077100CE8380B8810C0C49C410877029D63 -:1038900040B0A80988118B209C499D48954B9643C0 -:1038A000087702861418E47315E45A08770205BBFA -:1038B000029B4A9B4297468812871108DA149A4E57 -:1038C0000D88100D77110877021AE44E06D8140DF2 -:1038D0006610087702974FC78F984D984C98458788 -:1038E0001598440715140D55110A5502954715E40E -:1038F000638A262D46102D46182D46202C46112C65 -:1039000046192C46212B46122B461A2846142846C7 -:10391000152B462288162546242546268B170A0C89 -:1039200048090D4885130EDD1105CC110839400BEF -:10393000EB390299101EE4520DCC020D5511082DE1 -:10394000400655022E461316E41D0FDD11254616BE -:10395000080840851B0188100DBB0286671DE449DD -:103960000988020CBB0219E4191CE4472B46172DE9 -:10397000461BA7661BE446C0702C461C0988028CB7 -:103980001E28461E2B4623C0908B1D29461D294606 -:103990001F18E43F2946272846252931162E2006E0 -:1039A00029246A243117962D242538861CCCE1273A -:1039B0002407C0D7090E4064E0829A29092841648F -:1039C000809164409B2D2406C098094936280AA09E -:1039D00024628501C404A84428210424668508883B -:1039E000118E3F8A3E2D32100EA41800C4040EAE74 -:1039F0001800EE110ACA530EDD02C0E30E880298C9 -:103A0000C11EE42409084E9EC08E2094C398C59D13 -:103A1000C418E3F01DE42105EE110EAA020DAA025E -:103A2000A8B82784CF9AC21EE3E224F29D27E4A21D -:103A3000244C1824F69D655052C020D10F2D240629 -:103A4000C0A0C09809493604A93863FF7FC0A063AD -:103A5000FE070000654F6DC098C0A82A240663FFCA -:103A60006B2D2406C09063FF63CC57DA20DB308CCB -:103A700010580C38C020D10F00DA20C0B6580CC73F -:103A800063FFE500DA20580CC563FFDC2A2C748B39 -:103A90001058053FD2A0D10F6C10062820068A339B -:103AA0006F8202600161C05013E3C229210216E354 -:103AB000C1699204252502D9502C20159A2814E3B7 -:103AC000BF8F2627200B0AFE0C0477092B711C647C -:103AD000E1398E428D436FBC0260016F00E104B09A -:103AE000C800881A08A80808D80298272B2006685A -:103AF000B32ECE972B221E2C221D0111027BC90151 -:103B0000C0B064B0172CB00728B000DA2003880AD0 -:103B100028824CC0D10B8000DBA065AFE7C020D16C -:103B20000F2D206464DFCA8B29C0F10BAB0C66BF7C -:103B3000C02B200C0CBC11A6CC28C2862E0A0878FB -:103B4000EB611EE39D0EBE0A2EE2A368E00528226B -:103B5000007E894F29C2851EE3A96490461FE3B603 -:103B60009E90C084989128200A95930F880298927D -:103B70008E200FEE029E942F200788262F950A98FC -:103B8000969A972E200625240768E3432921022AC6 -:103B9000C2851DE3902AAC20ADBD25D4CF2AC685B1 -:103BA00063FF4E002E2065CBEDC082282465C9F648 -:103BB00005E4310002002A62821BE3982941020BCE -:103BC000AA022A668209E43129210263FF23000048 -:103BD00064DFB88F422E201600F1040DEE0C00EECB -:103BE0001AAEAE9E2963FFA38A202B3221B1AA9A76 -:103BF000B0293221283223B4992936217989A92B79 -:103C000032222B362163FFA0C020D10F9F2725240D -:103C100015ACB828751C2B2006C0C12EBCFE64E074 -:103C2000AB68B7772DBCFD65DEC72D2064C0F0649E -:103C3000D0868E290EAE0C66E089C0F128205A2865 -:103C40008CFE08CF3865FEE863FF580000E004935F -:103C500010C0810AF30C038339C78F08D80308A862 -:103C60000108F80C080819A83303C80CA8B828756F -:103C70001C030B472B24158310CBB700E104B0BC09 -:103C800000CC1AACAC0CDC029C27659E5EC0B20B6B -:103C9000990209094F29250263FE50002D206A0D63 -:103CA0002D4165DF7EDA20C0B0580C8F64AF18C09C -:103CB000F163FEEF9F2763FFD02E221F65EE326374 -:103CC000FF79000028221F658E2763FF6E252406DA -:103CD00029210263FE1B00006C10066571332B4C1A -:103CE00018C0C7293C18C0A1C08009A838080842DC -:103CF0006481101CE32C1AE32D2AC67E2A5CFDD3B6 -:103D00000F6DAA0500B08800908C8940C0A009887A -:103D1000471FE355080B47094C50090D5304DD10AC -:103D2000B4CC04CC100D5D029D310CBB029B3088DD -:103D3000438E2098350FEE029E328D26D850A6DD98 -:103D40009D268E40C0900E5E5064E0971CE33B1EA3 -:103D5000E32B038B0BC0F49FB19EB02D200A99B3C7 -:103D60000CDD029DB28F200CFF029FB48E262D2009 -:103D7000079EB68C282DB50A9CB72924072F20064C -:103D80002B206469F339CBB61DE30D2320168DD2A9 -:103D90000B330C00D10400331AB48DA3C393292232 -:103DA000200C13E30C1FE3030C2E11AFEEA322290A -:103DB00024CF2FE285D2A00FDD0B2DE685D10F0099 -:103DC0002E200CB48C0CEB111FE3031DE2FAAFEEB6 -:103DD000ADBB22B28529E4CF02C20B22B685D2A0A8 -:103DE000D10F00002E200C1CE2F31FE2FA0CEB11A5 -:103DF000AFEEACBB22B28529E4CF02820B22B6859E -:103E0000D2A0D10FC0D00BAD387DC80263FEEC63E9 -:103E1000FEE08E40272C747BEE12DA70C0B32C3C8F -:103E200018DD50580A868940C08063FEE3066E02A2 -:103E3000022A02DB30DC40DD505800049A10DB50CF -:103E4000DA70580453881063FEF700006C10069275 -:103E5000121EE2E48C40AE2D0C8C472E3C1804CA96 -:103E60000BD9A07DA30229ADF875C302600084C000 -:103E7000B0C023C0A09D106D0844B89F0EB80A8D35 -:103E8000900EB70BB8770D6D36ADAA9D800D660C00 -:103E9000D8F000808800708C879068B124B22277B7 -:103EA000D3278891C0D0CB879890279C100070882A -:103EB00000F08C9D91CB6FC08108BB0375CB36633E -:103EC000FFB4B1222EEC1863FFD485920D770C86D7 -:103ED000939790A6D67D6B01B1559693959260000D -:103EE00016B3CC2D9C188810D9D078D3C729DDF80B -:103EF00063FFC100C0238A421BE2E900CD322D449A -:103F0000029B3092318942854379A1051EE2E50E7C -:103F1000550187121BE2D5897095350B99029932AC -:103F200088420A880C98428676A6A696768F44AF79 -:103F3000AF9F44D10F0000006C10089311D6308859 -:103F400030C0910863510808470598389812282115 -:103F500002293CFD08084C6581656591628A630A07 -:103F60002B5065B18B0A6F142E0AFF7CA60A2C20F9 -:103F70005ACCC42D0A022D245A7FE0026002158912 -:103F80002888261FE2C809880C65820F2E200B0F97 -:103F9000EE0B2DE0FE2EE0FF08DD110EDD021EE22D -:103FA000C2AEDD1EE2C21CE2C20EDD010DCC37C185 -:103FB00080084837B88DB488981089601AE2807BF1 -:103FC00096218B622AA0219C147BA3179D132A2083 -:103FD0000C8B108C20580BB18C148D13DBA0CEAC45 -:103FE0006001C4002E200C1BE2730CEA110BAA081E -:103FF0002BA2861FE2717BDB3B0FEF0A2FF2A36837 -:10400000F0052822007F892C2BA28564B0AA876244 -:104010008826DE700C7936097A0C6FAD1C8F279BD1 -:104020001508FF0C77F3197E7B729D139C149B156A -:10403000CF56600025C0B063FFD0D79063FFDD008E -:10404000009D139C14DA20DB70580B168B158C1412 -:104050008D1365A06A8E6263FFCC00DA208B11DCC1 -:1040600040580ABCD6A08B15C051DE70DA20DC6047 -:10407000DD405BFF768D138C14D9A02E200C1BE243 -:104080004D1FE2540CEA11AFEFC0E0ABAA2BA285A2 -:104090002EF4CF0B990B29A68563FF1D00DA20DCD7 -:1040A00060DD40DE708912282007DF50A9882824AF -:1040B000075BFF09D2A0D10F00DBE0DA20580B37F5 -:1040C0006550EF2A20140A3A4065A0EBDB60DC4023 -:1040D000DD30022A025809A7D6A064A0D584A183A6 -:1040E000A00404470305479512036351C05163FEC2 -:1040F0005C2C2006D30F28CCFD6480A568C704C0C3 -:10410000932924062C2006C0B18D641FE22C9D2724 -:104110009D289D298FF29D2600F10400BB1A00F016 -:1041200004B0BE0EDD01C0F0ADBB8D652F24070DC0 -:104130000E5E01EE11AEBB2E0AFEB0BB0B0B190ECC -:10414000BB36C0E20B0B470EBB372B241618E224FC -:104150000A09450D0B422B240B29240AB4BE2E2438 -:104160000C7D88572920162FCCFDB09D0A5C520D7E -:10417000CC362C246465FDEC0C0C4764CDE618E2CB -:104180000F8E2888820C9F0C00810400FF1AAFEE6E -:104190009E2963FDCF1CE23E63FE13001CE23563E3 -:1041A000FE0C8D6563FFA500DA202B200C580B2038 -:1041B000645F0FC020D10F00C020D10FC09329240D -:1041C00016C09363FFA000006C1004C06017E1F8F4 -:1041D0001DE1FBC3812931012A300829240A78A175 -:1041E00008C3B27BA172D260D10FC0C16550512605 -:1041F00025022AD0202F200B290AFB2B20142E2049 -:104200001526241509BB010DFF0928F11C2B2414C8 -:10421000A8EE2EF51C64A0A92B221E28221D011138 -:10422000027B8901DB6064B0172CB00728B000DA8C -:104230002007880A28824CC0D10B8000DBA065AF24 -:10424000E7DB30DC40DD50DA205800DE29210209AE -:104250000B4CCAB2D2A0D10F00CC5A2C30087BC173 -:10426000372ED02064E02D022A02033B02DC40DD21 -:10427000505800D4D2A0D10F2B2014B0BB2B241443 -:104280000B0F4164F0797CB7CAC0C10C9C022C258D -:1042900002D2A0D10FC020D10F2E200669E2C12684 -:1042A00024062B221E2F221D29200B2820150D99B4 -:1042B000092A911C262415AA8828951C7BF149609F -:1042C0000048B0BB2B24140B0A4164A0627CB702E7 -:1042D0002C25022B221E2C221DD30F7BC901C0B01E -:1042E000C9B62CB00728B000DA2007880A28824C0B -:1042F000C0D10B8000DBA065AFE7C020D10F00006C -:10430000262406D2A0D10F0000DB601DE1AC64BF03 -:104310004F2CB00728B000DA2007880A28824CC04A -:10432000D10B8000DBA065AFE71DE1A463FF310086 -:1043300026240663FF9C00006C1004282006260A31 -:10434000046F856364502A2920147D9724022A0271 -:10435000DB30DC40DD50580019292102090A4CC825 -:10436000A2C020D10FC0B10B9B022B2502C020D1CF -:104370000F00022A02033B022C0A015800D1C9AAED -:10438000DA20DB30DC405809F329A011D3A07E9756 -:10439000082C0AFD0C9C012CA411C0512D201406E0 -:1043A000DD022D241463FFA4DA20DB30DC40DD5075 -:1043B000C0E0580973D2A0D10F0000006C1006169F -:1043C000E17D1CE17D655157C0E117E179282102AB -:1043D0002D220008084C6580932B32000B6951296F -:1043E0009CFD6590872A629E6EA84C2A722668A062 -:1043F000027AD9432A629DCBAD7CBE502B200C0C97 -:10440000BD11A6DD28D2862F4C0478FB160CBF0AFE -:104410002FF2A368F0052822007F89072DD285D3CB -:104420000F65D0742A210419E1A3D30F7A9B2EDAE9 -:104430002058086E600035002D21041BE19E7DBBD5 -:1044400024DA20C0B6580869CA546001030B2B5007 -:104450002B240BB4BB0B0B472B240C63FFA0DA20DF -:10446000580A4E600006DA20C0B6580A4C6550E083 -:10447000DC40DB302D3200022A020D6D515808BDA0 -:104480001CE14ED3A064A0C8C05184A18EA0040436 -:10449000470E0E4763FF3500002B2104C08C893185 -:1044A000C070DF7009F950098F386EB8172C20667C -:1044B000AECC0C0C472C24667CFB099D105808CF11 -:1044C0008D1027246694D11EE151B8DC9ED0655032 -:1044D00056C0D7B83AC0B1C0F00CBF380F0F42CBAE -:1044E000F119E13018E13228967EB04BD30F6DBA46 -:1044F0000500A08800C08C2C200CC0201DE1360CCB -:10450000CF11A6FF2EF285ADCC27C4CF0E4E0B2EB9 -:10451000F685D10FC0800AB83878D0CD63FFC100CE -:104520008E300E0E4763FEA12A2C742B0A01044D17 -:10453000025808C22F200C12E1270CF911A699A2EB -:10454000FF27F4CF289285D2A008480B289685D162 -:104550000FC020D10F0000006C1004C060CB55DBF1 -:1045600030DC40055D02022A025BFF94292102092A -:10457000084CC882D2A0D10F2B2014B0BB2B24141E -:104580000B0C41CBC57DB7EBC0C10C9C022C2502A6 -:10459000D2A0D10F0000022A02033B02066C02C027 -:1045A000D0C7F72E201428310126250228240A0F0F -:1045B000EE012E241458010E63FFA300262406D218 -:1045C000A0D10F006C1006282102D62008084C65E7 -:1045D000809D2B200C12E0F70CB811A2882A82864D -:1045E000B5497A930260009719E0F409B90A299253 -:1045F000A36890082A620009AA0C65A08228828517 -:104600001CE0FF6480799C80B887B14B9B819B1034 -:10461000655074C0A7D970280A01C0D0078D380D25 -:104620000D42CBDE1FE0E01EE0E12EF67ED830D357 -:104630000F6D4A0500808800908C2E3008C0A000C5 -:10464000EE322E740028600C19E0E30C8D11A2DD0F -:10465000A988C0202CD2852284CFD2A00CBC0B2CE0 -:10466000D685D10FC0F0038F387FA0C063FFB400A0 -:10467000CC582A6C74DB30DC405807F6C020D10FD0 -:10468000DA605809C663FFE7DD402A6C74C0B0DC0D -:104690007058086A2E30088B1000EE322E740028F5 -:1046A000600C19E0CC0C8D11A2DDA988C0202CD2A1 -:1046B000852284CFD2A00CBC0B2CD685D10F000054 -:1046C0006C1004292014282006B19929241468812B -:1046D00024C0AF2C0A012B21022C24067BA004C08D -:1046E000D02D2502022A02033B02044C02C0D058FE -:1046F00000C0D2A0D10FC020D10F00006C1004293F -:104700003101C2B429240A2A3011C28378A16C7BFA -:10471000A1696450472C2006C0686FC562CA572D36 -:1047200020147CD722DA20DB30DC40DD505BFFA593 -:10473000292102090E4CC8E2C020D10FC0F10F9F01 -:10474000022F2502C020D10FDA20DB30C0C05BFF72 -:10475000DC28201406880228241463FFC7292015AA -:104760001BE0972A200BC0C09C240BAA092BA11C7C -:104770002C2415AB9929A51C63FF9900C020D10FEB -:10478000DA20DB30DC40DD50C0E058087DD2A0D11B -:104790000F0000006C1004CB5513E09225221F0D72 -:1047A000461106550CA32326221E25261F06440B60 -:1047B00024261E734B1DC852D240D10F280A80C038 -:1047C0004024261FA82828261E28261DD240D10FA7 -:1047D000C020D10F244DF824261E63FFD80000000E -:1047E0006C1004D620282006C0706E85026000D4AC -:1047F0001DE07919E07112E06F2A8CFC64A1302B66 -:104800006102B44C0B0B4C65B0A22B600C8A600C9F -:10481000B8110288082E828609B90A7EC302600098 -:104820009A2992A368900509AA0C65A08E28828512 -:10483000648088B8891BE07594819B80655155C060 -:10484000B7B8382A0A01C0C009AC380C0C4264C0A1 -:10485000421FE0541EE0562EF67EB04AD30F6DAADA -:104860000500808800908CC0A029600C0C9C11A2CF -:10487000CC2BC285AD990B4B0B2BC6852860062728 -:1048800094CF6881222D6015D2A0C9D2C0E22E64D7 -:1048900006D10F00C0F008AF387FB0BD63FFB10094 -:1048A000276406D2A0D10F00D2A0D10F00CC57DAD6 -:1048B00060DB30DC405808A7C020D10FDA6058090F -:1048C0003763FFE80028221E29221DD30F789901A3 -:1048D000C080C1D6C1C11BE043C122AB6B64804222 -:1048E00078913F2A80000CAE0C64E0BB02AF0C64F0 -:1048F000F0B52EACEC64E0AF0DAF0C64F0A92EACBB -:10490000E864E0A32FACE764F09D2EACE664E0978A -:104910002F800708F80BDA807B83022A8DF8D8A055 -:1049200065AFBC28612308D739D97060007B0000CF -:104930002B600C0CB811A2882C82862A0A087CAB4A -:104940007E09BA0A2AA2A368A0052C62007AC96F60 -:104950002A828564A0691FE029276504C0E3C0C4DA -:104960002E64069CA11CE0549FA02E600A97A30C05 -:10497000EE029EA28F600CFF029FA42E60147AEFBD -:104980004627A417ADBC2F828527C4CF2FFC202F2C -:10499000868563FE692A6C74C0B1DC90DD405807DF -:1049A000A71DE00C63FEC100D9A0DA60DB30C2D0E5 -:1049B000C1E0DC4009DE39DD505807F1D2A0D10F4B -:1049C000DA605808F663FEE4290A0129A4170DBF2E -:1049D000082E828527F4CF2EEC202E868564500B7E -:1049E0002A6C74DB4058016AD2A0D10FC020D10FCD -:1049F0006C10062B221E28221D93107B8901C0B04B -:104A0000C0C9C03BC1F20406401DDFF6C0E2C0745D -:104A10000747010E4E01AD2D9E11C0402E0A1464B1 -:104A2000B06E6D084428221D7B81652AB0007EA1EE -:104A30003B7FA1477B51207CA14968A91768AA1434 -:104A400073A111C09F79A10CC18B78A107C1AE29B8 -:104A50000A1E29B4007CA12B2AB0070BAB0BDAB0DD -:104A60007DB3022ABDF8DBA0CAA563FFB428B0104D -:104A700089116987BB649FB863FFDC00647FB463FE -:104A8000FFD50000646FD0C041C1AE2AB40063FFFF -:104A9000C62B2102CEBE2A221D2B221E7AB12A8CC1 -:104AA000107CB1217AB901C0B0C9B913DFC1DA20D5 -:104AB00028B0002CB00703880A28824CC0D10B8094 -:104AC00000DBA065AFE7D240D10F8910659FD463AA -:104AD000FFF300006C1008C0D0C8598C30292102A7 -:104AE0000C0C4760000C8E300E1E5065E19E292193 -:104AF00002C0C116DFB0090B4C65B0908A300A6E57 -:104B00005168E3026000852F629E1BDFA96EF85397 -:104B10002BB22668B0052E22007BE94727629DB79D -:104B200048CB7F97102B200CB04E0CBF11A6FF294D -:104B3000F2869E12798B4117DFA007B70A2772A36E -:104B4000687004882077893029F285DF90D79065D6 -:104B500090652A210419DFD77A9B22DA205806A310 -:104B6000600029002C21041BDFD37CBB18DA20C095 -:104B7000B658069EC95860014CC09063FFCCDA203D -:104B8000580886600006DA20C0B65808846551359A -:104B9000DC40DB308D30DA200D6D515806F6C0D088 -:104BA000D3A064A120292102C05184A18CA00404B7 -:104BB000470C0C4763FF3E00C09C8831DBD008F8EF -:104BC00050089B3828210498116E8823282066AC51 -:104BD0008C0C0C472C24667CBB159F139E148A10EA -:104BE0008B115807068E148F13C0D02D24668A307F -:104BF000C092C1C81BDF867FA6099BF099F12CF4F7 -:104C00000827FC106550A4B83ADF70C051C0800777 -:104C1000583808084264806718DF6319DF64298602 -:104C20007E6A420AD30F6DE90500A08800F08CC0AF -:104C3000A08930B4E37F9628C0F207E90B2C9408D2 -:104C40009B909F912F200C12DF630CF811A68829EE -:104C50008285A2FF2DF4CFD2A009330B238685D104 -:104C60000F22200C891218DF5B0C2B11A6BBA82287 -:104C70002D24CF2CB285D2A00C990B29B685D10F4B -:104C8000C087C0900A593879809663FF8ADB30DA92 -:104C900020C0C1C0D05BFF56292102C0D02A9CFE93 -:104CA00065AE4D2D2502C09063FE45009E142A2C52 -:104CB00074C0B1DC70DD405806E18E14C0D01BDF3B -:104CC00053C1C863FF6AC020D10F00006C100628D2 -:104CD000210216DF3808084C65821929629E6F98F8 -:104CE0000260022019DF332992266890078A200982 -:104CF000AA0C65A20F27629DC0CC6472072B210409 -:104D00008E31C0A0DDA00EFE500ECD386EB8102C36 -:104D10002066B1CC0C0C472C24667CDB026001EFD2 -:104D2000C0C12930081BDF2564909C2F0AFFC0D327 -:104D3000B09E64E1026892136450882A2C74044B7C -:104D4000025800930AA20206000000002B200C2744 -:104D500021040CBC11A6CC29C286280A087983023A -:104D60006001B919DF1509B90A2992A36890082EC4 -:104D7000220009EE0C65E1A42EC28564E19E262086 -:104D80000713DF1E6E7B0260019A17DF151FDF1EFF -:104D900019DF4BC0D228200A93E09DE1A9690F8852 -:104DA0000298E22F90802A9480B1FF07FF029FE3D0 -:104DB0002EC2851FDF080EDE0BAFBF2AF4CF2EC632 -:104DC00085655F76C020D10F2830102930112E3034 -:104DD0001300993200ED326480EE2A30141FDF3860 -:104DE00000AA3278EF050F9E092DE47F1EDF36669C -:104DF000A0050F98092A8480B4A718DF33C76F0075 -:104E00009104AE9EDDE000AF1A00C31A6EE1052DDD -:104E1000B2000DED0C1EDF2D08D81C063303AE8842 -:104E20002A848B2EB02E27848C03EE010FEE022EE7 -:104E3000B42E58018F63FEFF29310829250428303C -:104E4000142E3109B0886480A32E240AC0812E302C -:104E5000162CB4232E240BB4EF2F240C8C378B3656 -:104E6000292504DEB0DFC00C8F390B8E390FEE021E -:104E700064EEC4089F1101C4048D380CB81800C436 -:104E8000040CBE1800EE110EDD02C0E30EFF021E80 -:104E9000DF019F719E701EDF008F2098739D740547 -:104EA000FF110BCD53C18098750FDD020EDD029D01 -:104EB000721EDEBF2A24662F629D2AE4A22FFC18F0 -:104EC0002F669D63FE710000002F30121BDF010072 -:104ED000FA3278FF050B980B2A847F66D0050B9A6F -:104EE0000B2DA4802A301100AA3263FF442F240A1C -:104EF0009E2B63FF56CC57DA20DB30DC4058071579 -:104F0000C020D10F00DA20C0B65807A463FFE50027 -:104F1000DA7058063AC0A02A246663FE02DA2058E6 -:104F2000079F63FFCFB16928200A862009094799A6 -:104F30001129240798107F812693E027E50A9AE338 -:104F400088109DE119DEDD8D11096F029FE42DE4CB -:104F500016098802C0D398E22A240763FE51000094 -:104F60001DDEA60868118F11892B93E008FF02C08F -:104F70008F9FE50D990299E2047F11C0D49DE1084D -:104F8000FF029FE463FFD0006C1004C020D10F002B -:104F90006C100485210D381114DE848622A42408A7 -:104FA000660C962205330B9321743B13C862D230F2 -:104FB000D10FC030BC29992199209322D230D10F32 -:104FC000233DF8932163FFE36C100AD62094181751 -:104FD000DE79D930B83898199914655252C0E1D2A7 -:104FE000E02E61021DDE760E0E4C65E1628F308E82 -:104FF000190F6F512FFCFD65F1558EE129D0230E5D -:105000008F5077E66B8F181EDEB3B0FF0FF4110FD1 -:105010001F146590CE18DEB08C60A8CCC0B119DE2C -:105020006428600B09CC0B0D880929811C28811A82 -:105030002A0A0009880C08BA381BDEA60CA90A291E -:1050400092947B9B0260008C2B600C94160CBD111B -:10505000A7DD29D286B8487983026000D219DE56CE -:1050600009B80A2882A398176880026000A360002C -:10507000A51ADE9A84180AEE01CA981BDE4D8C1917 -:105080002BB0008CC06EB3131DDE4A0C1C520DCC2D -:105090000B2DC295C0A17EDBAE6000380C0C5360B6 -:1050A000000900000018DE8C8C60A8CCC0B119DEAD -:1050B0004028600B09CC0B0D880929811C28811A16 -:1050C0002A0A0009880C08BA380CA90A2992947E89 -:1050D000930263FF72DA60C0BA580730645073609D -:1050E000026600001ADE338C192AA0008CC06EA361 -:1050F0001A18DE2F0C1C5208CC0B18DE762BC2952A -:10510000C0A178B30263FF3F63FFC9000C0C536377 -:10511000FF09896078991829D285C9922B729E1D42 -:10512000DE246EB8232DD226991369D00B60000DB2 -:10513000DA6058071A6000170088607D890A9A1A99 -:1051400029729D9C129915CF95DA60C0B658071345 -:105150006551F58D148C18DBD08DD0066A020D6D6B -:1051600051580584D3A09A1464A1DD82A085A1B80A -:10517000AF9F190505470202479518C05163FE60AD -:105180002B6104C08C8931C0A009F950098A386E9E -:10519000B81F2C6066A2CC0C0C472C64667CAB114B -:1051A0009F119E1B8A155805958E1B8F11C0A02A32 -:1051B00064669F1164F0E12912032812096DF91742 -:1051C0002F810300908DAEFE0080889F9200908C0E -:1051D000008088B89900908C65514E8A10851A8B92 -:1051E000301FDE06881229600708580A2C82942D89 -:1051F00061040ECC0C2C86946FDB3C1CDE30AC9C26 -:1052000029C0800B5D50A29909094729C48065D047 -:10521000DA2E600CC0D01FDDEF0CE811AFEEA788CE -:105220002282852DE4CF02420B228685D2A0D10FA7 -:105230008E300E0E4763FDA6A29C0C0C472C640713 -:105240007AB6CD8B602E600A280AFF08E80C6481CC -:105250000E18DE1983168213B33902330B2C341661 -:105260002D350AC02392319F30C020923308B202FC -:1052700008E80292349832C0802864072B600CD270 -:10528000A01CDDD40CBE11A7EE2DE285ACBB28B46A -:10529000CF0D9D0B2DE685D10F8B1888138D30B85F -:1052A0008C0D8F470D4950B4990499100D0D5F0472 -:1052B000DD1009FF029F800DBB029B8165508D852B -:1052C0001AB83AC0F1C0800CF83808084264806B04 -:1052D0001BDDB519DDB629B67E8D18B0DD6DDA059A -:1052E00000A08800C08CC0A063FEF30082138B1660 -:1052F0001DDDC628600AC0E02EC4800D880202B2FF -:105300000B99239F20C0D298229D2122600CB2BB12 -:105310000C2D11A7DD28D28508BB0B18DDAE2BD6CE -:1053200085A8222E24CFD2A0D10F9E1B851A2A6CCD -:10533000748B185BFF178E1B63FEA300C087C090A1 -:105340000AF93879809263FF86C020D10F9E1B2A0C -:105350006C74C0B18D185805398E1B851A63FE7E9A -:10536000886B8213891608BE110ECE0202920B9E24 -:1053700025B4991EDDA19F200E88029822C0EF045B -:10538000D8110E88029824C0E49E21C080D2A02BA0 -:10539000600C2864071CDD8F0CBE11A7EE2DE28582 -:1053A000ACBB28B4CF0D9D0B2DE685D10F000000BE -:1053B0006C1004C020D10F006C10048633C071C083 -:1053C00030600001B13300310400741A04620174CA -:1053D00060F1D10F6C1004022A02033B025BFFF65E -:1053E0001CDD771BDDBFC79F88B009A903098A01AF -:1053F0009AB079801EC0F00FE4311DDD6E0002000E -:105400002BD2821EDDB82AC1020EBB022BD6820A25 -:10541000E431D10F28C102C19009880208084F2841 -:10542000C50208E431D10F006C1004C0C00CE43197 -:1054300012DD631ADD6000020029A28218DDAC1BB8 -:10544000DDAA2621020B990108660129A6822625DC -:105450000206E43114DDA715DDA2236A902326128B -:105460008550242611252613222C40D10F00000040 -:105470006C1008D6102B0A64291AB41ADD4D0D23BE -:10548000111CDD4E0F2511B81898130E551118DD9B -:1054900099AC55A838AA332C80FF2A80FEA933285E -:1054A0008D0129800108AA112880000CAA02088811 -:1054B0001109880208AA1C288C08281604580862BA -:1054C00014DD3F0AA7022441162A30802B1204075C -:1054D000AA2858085DB1338B13B4559A6004AC28E0 -:1054E000B4662C56277B69E016DD769412C050C056 -:1054F000D017DD329D15D370D4102F60802E6082BE -:105500009F169E17881672891A8D128C402A607F0A -:105510000DCC282B3A200CAA2858084BC0B10ABE43 -:10552000372E35408F1772F91A8D128C402A608100 -:105530000DCC282B3A200CAA28580843C0B10ABE2B -:10554000372E3542B233B444B1556952B6B466C051 -:10555000508F15B877D370B2FF9F156EF899D10FA1 -:105560006C1004C021D10F006C1004270A001CDD50 -:10557000111FDD221EDD251DDD0E1ADD501BDD5E37 -:10558000C02824B0006D2A75AA48288080C0916484 -:10559000806100410415DD09C03125502E00361A06 -:1055A0000655010595390C56110C66082962966E50 -:1055B000974D0D590A29922468900812DD42024243 -:1055C0000872993B23629512DD06CB349F3002822C -:1055D000020E4402C092993194329233AD52246249 -:1055E00095C090244C1024669524B0002924A0AACC -:1055F00042292480B177B14404044224B400D10F7D -:10560000D10FD10F6C10041ADCEA2AA00058021C3A -:105610005BFFD5022A02033B025BFFD11BDCE8C91A -:10562000A12CB102C0D40DCC020C0C4F2CB5020C35 -:10563000E431D10FC0A00AE43118DCDE0002002FF3 -:10564000828219DCF12EB10209FF022F86820EE45C -:1056500031D10F006C1004C02002E43114DCD816E4 -:10566000DCD5000200226282234102732F0603E48C -:1056700031C020D10F19DD221ADD212841020A2A6A -:10568000010988012A668228450208E43115DD18DF -:1056900012DD1D25461DD10F6C1004292006289C03 -:1056A000F96480A02A9CFD65A0968A288D262F0A81 -:1056B000087AD9042B221FC8BD2C206464C0812E17 -:1056C00022090EAE0C66E0782B200C1EDCBA0CBC56 -:1056D00011AECC28C28619DCB878F3026000AD099F -:1056E000B90A2992A36890082E220009EE0C65E001 -:1056F0009B29C2851FDCC26490929F90C0E41FDC8E -:10570000CE9E9128200AC0E09E930F88029892882E -:10571000200F880298942F20079A979D962F950A1C -:105720002E240728200629206468833328C2851286 -:10573000DCA9288C20A2B22E24CF28C685C020D177 -:105740000FC020D10F2A206A0111020A2A4165AF39 -:1057500052DA20C0B05805E464AFE5C021D10F0093 -:10576000649FC81FDC962D20168FF209DD0C00F116 -:105770000400DD1AADAD9D2912DC9728C285A2B2C6 -:105780002E24CF288C2028C685C020D10FC021D13F -:105790000F0000006C1004260A001BDCDB15DC8700 -:1057A00028206517DC84288CFE6480940C4D110D34 -:1057B000BD082CD2F52BD2F42ED2F77CB13DB4BB70 -:1057C0002BD6F47BE9052BD2F62BD6F47CB92C2A08 -:1057D000D2F62AD6F52AD6F406E431000200287261 -:1057E000822AFAFF004104290A012F510200991A66 -:1057F0000A99030988012876820FE4312624652B53 -:10580000D2F48E5A2CD2F5B0EE9E5A7BCB1629D20A -:10581000F62FD2F70CB80C09FF0C08FF0C0F2F1451 -:10582000C8F96000320BCA0C0A2A14CEA92B510207 -:10583000C0C20CBB020B0B4F2B55020BE431D10F36 -:1058400000DB30DA205BFF941BDCB064AF5D0C4DF5 -:1058500011ADBD63FFA8000006E4310002002F7205 -:105860008218DC6E2E510208FF022F76820EE43180 -:10587000D10F00006C1004C03003E43116DC4E156B -:10588000DC4F00020024628274472118DCA0875A92 -:10589000084801286682CD7319DC9E0C2A11AA994A -:1058A0002292832992847291038220CC292B510267 -:1058B0000BE431C020D10F001FDC972E51020FEEF8 -:1058C000012E55020EE431B02DB17C9C5A12DC92AF -:1058D00008DD112D5619D10F6C10061BDC351EDCAE -:1058E0003722B0001ADC8E6F23721DDC75C0481899 -:1058F000DC8D1FDC8BDC10D5C083F000808600506F -:105900008A6D4A4F0F35110D34092440800B560A19 -:10591000296294B1330E55092251400F44110C44B1 -:105920000A874009A80C02883622514107883608A8 -:10593000770CA8992966949740296295874109A810 -:105940000C02883607883608770CA899296695973F -:1059500041030342B13808084298F0D10F1CDC72B1 -:1059600013DC7327B0002332B5647057C091C0D0E8 -:1059700016DC7115DC6FC0402AC00003884328C4C0 -:10598000006D793C004104B14400971A7780148E71 -:10599000502FB2952DB695AFEE2EED2006EE369E29 -:1059A0005060001877A00983509D5023B695600081 -:1059B0000223B295223D2006223622B695B455B870 -:1059C000BBD10F0003884328C400D10F6C1004C062 -:1059D0004004E43115DC59000200885013DC58CB38 -:1059E000815BFFBD1CDC570C2D11ADCC2BC2822A74 -:1059F000C28394507BAB142EC28429C2850ABD0C8D -:105A00000E990C0D990C0929146000050BA90C09BD -:105A10002914993015DBEA2A51020AE4312A2CFCB8 -:105A200058004B2B32000AA2022BBCFF9B30CCB695 -:105A3000C8A4D2A0D10F000004E4311EDBDE0002B6 -:105A4000002DE2822FBAFF2C51020FDD012DE682DC -:105A50000CE431D10F0000006C1004D10F000000E5 -:105A60006C1004C020D10F006C100413DC36C0D1C0 -:105A700003230923318DC0A06F340260008D19DB30 -:105A8000CD1BDBCE17DC2F0C2811A87726728325BF -:105A900072822CFAFF76514788502E7285255C045D -:105AA00025768275E9052572842576827659292E18 -:105AB00072842E76822E76830AE4310002002392CD -:105AC000820021042FB10200D61A0C6603063301AE -:105AD0002396820FE43126728325728260000200D1 -:105AE000D8A07659220AE4310002002392820021D4 -:105AF0000400D21A2FB1020C220302320122968234 -:105B00000FE431D280D10F00D280D10FC020D10F4D -:105B10006C1004DB30862015DBA6280A002825023D -:105B2000DA2028B0002CB00705880A28824C2D0AFC -:105B3000010B8000DBA065AFE61ADB9F0A4A0A2949 -:105B4000A2A3C7BF769101D10F2BA6A3D10F00004E -:105B50006C1004C0D1C7CF1BDB9919DB9617DB94FF -:105B60000C2811A87786758574C0A076516288507C -:105B70008E77B455957475E90385769574765927B3 -:105B80008F769F759F740AE431000200239282B4DD -:105B90002E2FB10200E10400D61A0C660306330171 -:105BA0002396820FE431867583747639280AE431AE -:105BB0000002002E9282B42200210424B10200DFF0 -:105BC0001A0CFF030FEE012E968204E431D280D12D -:105BD0000FD8A07651D6D280D10F00006C100429C6 -:105BE0000A801EDB9A1FDB9A1CDB730C2B11ACBBEB -:105BF0002C2CFC2DB2850FCC029ED19CD0C051C064 -:105C00007013DB9614DB9518DB932AB285A8280461 -:105C1000240A234691A986B8AA2AB685A98827848A -:105C20009F25649FD10F00006C100419DBC70C2A5C -:105C300011A9A98990C484798B761BDBB5ABAC2AFA -:105C4000C2832CC2847AC1688AA02BBC30D3A064E2 -:105C5000A05E0B2B0A2CB2A319DB7F68C0071DDBEB -:105C6000BBD30F7DC94AA929299D0129901F68919D -:105C70003270A603D3A0CA9E689210C7AF2AB6A3FB -:105C80002A2CFC5BFFB3D230D10F000013DBB10331 -:105C9000A3018C311DDB510C8C140DCC012CB6A34F -:105CA00063FFDC00C020D10FDA205BFFCCC020D125 -:105CB0000FC020D10F0000006C1004DB30C0D019E1 -:105CC000DB3CDA2028300022300708481209880A15 -:105CD00028824CDC200B80001BDB370C4A11ABAA5E -:105CE00029A28409290B29A684D10F006C1004C0B5 -:105CF0004118DB3017DB320C2611A727277030A89C -:105D000066256286007104A35500441A7541482235 -:105D1000628415DB5202320BC922882117DB2F085F -:105D20008414074401754905C834C020D10FD10F30 -:105D30000809471DDB86C0B28E201FDB1D0E0E43F7 -:105D4000AFEC2BC4A00FEE0A2DE6242A6284C020FB -:105D50000A990B296684D10FC020D10F6C1004DB87 -:105D600030C0D018DB13DA20253000223007085865 -:105D70000A28824CDC200B80008931709E121BDBCC -:105D80000D0C4A11ABAA29A28409290B29A684D19A -:105D90000F09C95268532600910418DB08C0A12FCF -:105DA000811200AA1A0AFF022F85121EDB020C4D77 -:105DB00011AEDD2CD2840C2C0B2CD684D10FC081DB -:105DC0001FDAFFB89A0A0A472EF11200A1040088D0 -:105DD0001A08EE022EF5121DDAF70C4C11ADCC2B81 -:105DE000C2840B2B0B2BC684D10F00006C1004DB7C -:105DF00030C0D019DAEFDA202830002230070988C5 -:105E00000A28824CDC200B80001CDAEA0C4B11AC17 -:105E1000BB2AB2840A2A0B2AB684D10F6C1004C0A4 -:105E20004118DAE416DAE60C2711A626266030A817 -:105E300072252286006104A35500441A7541082288 -:105E4000228402320BD10F00C020D10F6C10041538 -:105E5000DB410249142956112452120208430F88CB -:105E600011C07300810400361A008104C78F0077C7 -:105E70001A087703074401064402245612D10F0082 -:105E80006C10066E23026000AC6420A7C0A08510D1 -:105E900013DB1916DB30C040A6AA2BA2AE0B1941AA -:105EA00064906668915D68925268933C2AA2AA2821 -:105EB0003C7F288C7F0A0A4D2980012880002AAC6B -:105EC000F20888110988027589462B3D0129B00026 -:105ED0002BB0010899110B99027A9934B8332A2A08 -:105EE00000B1447249B160004A7FBF0715DB1B63F4 -:105EF000FFB90000253AE863FFB10000253AE863E6 -:105F0000FFA90000250A6463FFA1C05A63FF9C003B -:105F100000705F082534FF058C142C34FE70AF0B25 -:105F20000A8D142E3D012AE4012DE400DA405BFDC8 -:105F30005063FFA7D10FD10F6C10041ADAA019DA41 -:105F40009D1CDB061BDB07C080C07160000D0000DC -:105F50000022A430B1AA299C107B915F26928679F9 -:105F6000C2156E6262C0206D080AB12200210400D1 -:105F7000741A764BDB63FFEE2292850D63110325C5 -:105F800014645FCFD650032D436DD9039820B422FB -:105F90000644146D49229820982198229823982429 -:105FA00098259826982798289829982A982B982CED -:105FB000982D982E982F222C4063FF971EDA7E276B -:105FC000E68027E681D10F00C02063FF8300000038 -:105FD0006C1004C062C04112DA791ADA7513DAE182 -:105FE0002AA00023322D19DADB2BACFE2992AE6EEB -:105FF000A30260008E090E402D1AC2C2CD0EDC39FC -:106000002C251664B0895BFF9E15DAD71ADAD12BDE -:106010003AE80A3A0158058C2B21160ABB28D3A06E -:106020009B505805A32B52000ABB082A0A005805AA -:10603000A215DACE2D21022C3AE80C3C2804DD0210 -:106040002D25029C5058059A8B50AABBC0A158051B -:106050009A1CDAC72D21020C3C2806DD0213DAC592 -:106060002D25029C305805928B30AABBC0A2580542 -:10607000922A2102C0B40BAA020A0A4F2A2502580A -:1060800005A6D10F242423C3CC2C251663FF76004C -:1060900018DABD1CDAB919DABA1BDAB817DA8B8547 -:1060A000202E0AFD1FDAB92D202E24F47A24F47E46 -:1060B00024F4820EDD0124F4862E0AF70755280603 -:1060C000DD02C0750EDD01050506AB5BA959C0E810 -:1060D000AC5C24C4AB0EDD0227C4AC2E0ADFA8558D -:1060E00027B4EC0EDD0124B4EBC2E027942C0EDDC6 -:1060F0000224942B2E0A800D0D4627546C24546BD9 -:106100000EDD022D242E63FEFC0000006C10042A1C -:106110000A302B0A035BFF4D12DA8FC39029261633 -:10612000C3A1C0B3C08A2826175BFF48C03CC3B1D7 -:106130002B26161ADA222AA02023261764A079C358 -:10614000A2C0B15BFF42C3A2C0B15BFF40C3C22C7F -:106150002616C2AFC0B12326175BFF3CC28F28268C -:1061600016C0FE2F2617C2E22E26162A0AA1C0B19B -:10617000C0D82D26175BFF352A0AA12A2616C3A6EA -:10618000C0B3C1922926175BFF31C3C62C2616C1A6 -:10619000B32A0AA22B2617C0B35BFF2C290AA22917 -:1061A0002616C185282617C2FB2F2616C0E72E26E5 -:1061B000171DDA762D2610D10FC3A2C0B35BFF23C3 -:1061C00063FF82006C10041CDA3F1BDA2C18DA70B3 -:1061D00017DA7116DA7115DA71C0E0C0D414DA3B3F -:1061E0001FD9F7C0288FF06D2A36DAC0D9C07C5B82 -:1061F000020FC90C1CDA350C9C28A8C3A6C22A368B -:10620000802A2584A4C2A7CC2D248C2B248A2B245D -:10621000872E248BB1BB2E369F2C369E2C369DB1FB -:10622000AC1CDA161BDA5FC0286D2A33DAC0D9C07D -:106230007C5B020FC90C1CDA240C9C28A8C3A6C2E4 -:106240002A36802B2584A4C2B1BBA7CC2D248C2E4A -:10625000248B2A248A2E369F2C369E2C369DB1AC58 -:10626000C07919DA141BDA5113DA4F1ADA4F18DA37 -:106270005014DA1516DA5004F42812DA4F04660CBA -:10628000040506A252A858AA5AA3539B3029A50078 -:1062900027848AC091C0A52A848C29848B17DA4868 -:1062A00018DA47A75726361D26361E2E361F16DA51 -:1062B0004513DA45A65504330C2826C82E75002D43 -:1062C00054AC2E54AB2E54AA2326E62326E52E26C4 -:1062D000E7D10F006C100613DA2317DA1E24723D83 -:1062E0002232937F2F0B6D08052832937F8F026334 -:1062F000FFF3C0C4C0B01AD9B1C051D94004593954 -:1063000029A4206E44020BB502C3281ED9ACDDB00F -:1063100025E422052D392DE421C0501EDA2C19DA8E -:106320001C18DA1C16DA1E1DDA2A94102A72451778 -:10633000D9E76DA94BD450B3557A5B17DF50756B15 -:10634000071FD99E8FF00F5F0C12D9DF02F228AE23 -:106350002222D681D54013D9DC746B0715D99885D4 -:106360005005450C035328B145A73FA832A9332255 -:10637000369D22369E2436802B369F2BF48B2CF4B0 -:106380008C14D9F824424DC030041414C84C6D0844 -:1063900006B133041414C84263FFF20015D985C452 -:1063A000400031041AD986C0D193A200DD1AC13849 -:1063B000B0DD9DA318D9EC2B824D29824E29A51C56 -:1063C0002882537A871E2C54008E106FE45D12D9F8 -:1063D0007B2F211D23211C2F251B04330C23251C5F -:1063E00023251AD10FC06218D9DB88807E87D9890E -:1063F000102654006F94191BD9712AB11C0A1A1463 -:1064000004AA0C2AB51C2AB51D2AB51A2AB51BD117 -:106410000F1BD96A2AB11C0A1A1403AA0C2AB51C2C -:106420002AB51D2AB51A2AB51BD10F001CD9642B19 -:10643000C11D2DC11C2BC51B03DD0C2DC51C2DC57D -:106440001AD10F006C100619D95D14D9C212D9C522 -:1064500015D9E0C73FC0E02E56A82E56A92E56AA41 -:106460002E56AB23262918D985DB101CD9DAC0D4C7 -:106470002A42452D16019C1000B0890A880C2896E6 -:10648000005BFF942B22E318D94D0B5B149B842AED -:1064900022E48B84B1AA0A5A140BAA0C9A852922E9 -:1064A000E509591499862F22CD0F5F149F875BFF52 -:1064B000455BFF1623463BC1B01DD9401CD99E2A1F -:1064C000D1022C463A0BAA020A0A4F2AD5025804D6 -:1064D000925BFEBF5BFE98C050C0B016D93614D98F -:1064E0003E17D9AEC0C0C73E93122C262DC03060D7 -:1064F00000440000007F9F0FB155091914659FF4F7 -:10650000C0500AA9027FA7EF18D92ADA5008580A02 -:1065100028822C2B0A000B8000005104D2A0C091CD -:10652000C7AF00991A0A99039912CE3864206BD329 -:10653000202B20072516032C12022A62827CA863D6 -:1065400018D91C01110208580A28822CDA500B8035 -:1065500000D2A0643FD58A310A8A1404AA01C82A4D -:106560002B22010B8B1404BB017BA945DDA07A7B98 -:10657000081DD9122DD2000DAD0CDB3019D90D1A22 -:10658000D95288130ADA28DC801DD99009880A2894 -:10659000823C0DAA080B8000652F93D320C0B06306 -:1065A000FF9400007FAF34B1550050040A0919630D -:1065B000FF42DAB07B7B081AD9012AA2000ABA0C82 -:1065C0001BD9428C310BAB280C8A141CD980ACBB74 -:1065D0001CD98004AA012BC68163FF8F645F60C051 -:1065E00050C0B0C7CE9C1263FF5500006C1004274A -:1065F000221EC08008E4311BD8EF0002002AB282BC -:1066000019D8EF003104C06100661A2991020A6AA4 -:10661000022AB68209E43115D94A0C3811A8532848 -:1066200032822432842A8CFC7841102921022A36B5 -:106630008297A0096902292502D10F002B21022C83 -:1066400032850B6B022CCCFC2C368297C02B25029A -:10665000D10F00006C1004C0E71DD8D21CD8D40D97 -:106660004911D7208B228A200B4B0BD2A007A80CF4 -:106670009B72288CF4C8346F8E026000A31FD8CAA6 -:10668000A298AF7B78B334C93DC081C0F0028F3887 -:106690000F0F42C9FA2CD67ED5206D4A05003088EE -:1066A00000508C887008980878B16DD2A09870D18D -:1066B0000FC0F0038F387FE0DE63FFD8027B0CAFA2 -:1066C000BB0B990C643047D830C0F1C05002F5388C -:1066D0000505426450792CD67E0B36122F6C100FB4 -:1066E0004F366DFA0500808800208C06440CC0816E -:1066F000C05003B208237C0C038538050542645062 -:106700005A2CD67ED30F6D4A0500208800308CD2DB -:10671000A0A798BC889870D10FD2A0BC799970D1ED -:106720000FD2302BAD08C0F1C0500BF53805054233 -:10673000CB542CD67E083F14260A100F660C064652 -:10674000366D6A0500208800B08C827063FF2D00D2 -:10675000C05003F53875E08063FF7A00C0600286A0 -:106760003876E09F63FF9900C05003F53875E0C4A8 -:1067700063FFBE006C1004D62068520F695324DA00 -:1067800020DB30DC405800F3D2A0D10FDA20DB3020 -:10679000DC405800F09A2424240EC02122640FC04B -:1067A00020D10F00B83BB04C2A2C7489242D200E28 -:1067B0002E200FA4DDB1EE2E240FB0DD2D240E28E7 -:1067C00090072D9003A488B088B1DD2D9403289400 -:1067D000075BFFA069511DC0E082242A600F18D812 -:1067E000FE2A240329600E8F2029240708FF029F18 -:1067F000209E64D10FC020D10F0000006C100494C3 -:106800002319D8F6C0B3083A110BAA02992019D857 -:10681000699A2116D867C05028929D2564A2288CB9 -:106820001828969DD10F00006C1004282066C038EF -:10683000232406B788282466D10F00006C100603B5 -:106840005A0C0D36110D5C11D8208B2282210CBB05 -:106850000C06550F9B8202320B928113D853D9201C -:10686000A38F6450561CD84FC0D71BD850A256C017 -:10687000E1C09004E93809094276F34F044302CAA3 -:10688000912BC67ED30F6DAA0500208800308C891D -:1068900081A95909FA0C64A07D99818A8264A00FAC -:1068A000D290D10FC06002E63876D0D763FFD10016 -:1068B000C020BC89998199809282D10F7F230429BD -:1068C0002DF8998165BFD863FFE50000028F0CA306 -:1068D000FF0F3312931003AA0CD3406490402BC6D1 -:1068E0007E8610D30F6D6A0500208800308CBC8234 -:1068F000C090A4F3C041034938090942CA9B2BC682 -:106900007E6DAA0500208800308C0F590CA989BC27 -:1069100099998163FF8400BC89998163FF7C00C0E1 -:106920006002E63876D0B963FFB300C07002473822 -:1069300077D0CD63FFC700006C100414D82AC15271 -:10694000A424CA3128221D73811C292102659016B6 -:106950002A300075A912022A02033B022C3007C01C -:10696000D25801D0653FDCD10F2B300703BB0B0B96 -:10697000BA0274B3022ABDF8D3A063FFC4000000BA -:106980006C1004292006C0706E9741292102C08F27 -:106990002A2014C0B62B240606AA022A24147980C1 -:1069A000022725022A221E2C221D7AC10EC8ABDA2C -:1069B00020DB302C0A00033D025BF80D6450742D7F -:1069C00021020D0D4CC9D3C020D10F00002E9CFB1D -:1069D00064E0822F21020F0F4C65F0911AD7F61C4C -:1069E000D7F429A29EC08A798B5D2BC22668B00499 -:1069F0008D207BD95229A29DC0F364904A97901DA7 -:106A0000D8062E21049D9608EE110FEE029E979E49 -:106A10009118D802C0E527C4A22E24062BA29D2FD0 -:106A200021022BBC3008FF022F25022BA69DC0207F -:106A3000D10F00002F300068F939DA20DB30044C28 -:106A40000258004463FF7700022A022B0A0658000E -:106A5000D3220A00D10F6550102830006889240223 -:106A60002A02033B02DC4058003BC020D10FD27009 -:106A7000D10F00002A2C74033B02044C025BFEF58C -:106A800063FF3B00DB30DC402A2C745BFEF2C0204D -:106A9000D10F00006C1004C83F89268829A399995A -:106AA0002609880C080848282525CC52C020D10F7B -:106AB000DB402A2C745BF936D2A0D10F6C1004D8BD -:106AC00020D73082220D451105220C928264207459 -:106AD00007420B13D7B5D420A383732302242DF8C8 -:106AE000858074514CBC82C0906D081600408800AF -:106AF000708C773903D720C0918680743901D420F7 -:106B000074610263FFE2CA98C097C0411BD835C0C8 -:106B1000A00B8B0C0B4A380A0A42C9AA1DD7A21C2B -:106B2000D7A32CD67EC140D30F6D4A050020880024 -:106B3000308C9780D270D10FBC8FC0E00F4E387E62 -:106B400090E263FFD6BC8292819280C0209282D173 -:106B50000F0000006C1006C0D71CD7921BD7940DF5 -:106B60004911D7202E221F28221D0E4E0BD280073E -:106B70008A0C2E761F2AAC80C8346FAE026000CB20 -:106B80002F0A801AD798A29EAA7A7EA33FC93FC037 -:106B9000E1C05002E538050542CA552BC67EDB2010 -:106BA000D30F6D4A0500308800B08C2E721DAE9E4A -:106BB0000EA50C645086D2802E761DC091298403C8 -:106BC000D10FC05003E53875D0D363FFCD15D785FD -:106BD000027E0CA5EE643051C0A1250A0002A53842 -:106BE000033A020505426450922BC67E0E3512957B -:106BF00010255C10054536D30F6D5A0500A088009E -:106C0000208CC0A1A3E2C05023FA8003730C03A51B -:106C100038AF730505426450722BC67E851005455A -:106C20000C6D5A0500208800308CD280C0A10E9BCC -:106C30000CAB7BAFBB2B761D2A8403D10FD280C057 -:106C4000C1AF7D2D761D2C8403D10F00D2302E8D47 -:106C500008C0F1C0500EF538050542CB592BC67E51 -:106C60000A3F14C1600F660C064636D30F6D6A05E5 -:106C700000208800E08C22721D63FF03C061C050B9 -:106C800003653875D80263FF6263FF5CC05002A5DC -:106C90003875D08763FF8100C06003F63876D0BFB7 -:106CA00063FFB9006C10042A201529201614D7435D -:106CB0000A990CCB9D2E200B04ED092BD11C8F289B -:106CC00009BC36ACAA0CBB0C2BD51C0A0A472A24DB -:106CD00015CAAF8B438942B0A800910400881AA856 -:106CE000FF0FBB029B278F260FB80C783B1AC020E2 -:106CF000D10F0000292102C0A20A9902292502C051 -:106D000021D10F008B2763FFDC2BD11C0CAA0C0AAE -:106D10000A472A2415ACBB2BD51CC9AE8B438C2843 -:106D20008F42B0AD00F10400DD1AADCC0CBB029B6C -:106D300027DA20B7EB580019C021D10F9F2763FF36 -:106D4000EF0000006C100428203C643047053060E0 -:106D500000073E01053EB156076539054928C77F42 -:106D6000A933030641076603B166060641A6337ED2 -:106D7000871E222125291AFC732B1502380C098144 -:106D80006000063E01023EB12406423903220AD1C8 -:106D90000FD230D10FC05163FFC000006C10042728 -:106DA000221EC08008E4311DD7030002002CD282CD -:106DB0001BD703003104C06100661A2BB1020C6CB2 -:106DC000022CD6820BE43119D7870C3A11AA9328EA -:106DD00032829780253282243284B45525368275DA -:106DE000410A292102096902292502D10F2A21021B -:106DF0002B32830A6A022B36822A2502D10F000029 -:106E00006C100418D6EC0C2711087708267286251A -:106E10003C04765B1315D6E805220A2222A36820DB -:106E200002742904227285D10FC020D10F00000006 -:106E30006C100419D6EB27221EC08009770208E4E3 -:106E4000311DD6DC0002002CD2821BD6DC003104BE -:106E5000C06100661A2BB1020C6C022CD6820BE4C6 -:106E60003119D7600C3A11AA9328328297802532C3 -:106E700082243284B45525368275410B2A21020AB8 -:106E80006A022A2502D10F002B21022C32830B6BC0 -:106E9000022C36822B2502D10F0000006C10041B3F -:106EA000D6C50C2A11ABAA29A286B438798B221B2D -:106EB000D6C219D6E80B2B0A2BB2A309290868B051 -:106EC0000274B90D299D0129901F6E920822A28596 -:106ED000D10FC020D10FC892C020D10FDA205BEEB5 -:106EE000B3C020D10F0000006C100414D6B22842A9 -:106EF0009E19D6AF6F88026000BA29922668900763 -:106F00008A2009AA0C65A0AC2A429DC0DC64A0A41A -:106F10002B200C19D6A90CBC11A4CC2EC28609B901 -:106F20000A7ED30260009A2992A36890078D2009F7 -:106F3000DD0C65D08C25C2856450862D2104C030BF -:106F40006ED80D2C2066B8CC0C0C472C246665C07E -:106F50007B1CD72518D6AF1AD6A619D6B61DD6AB28 -:106F6000C0E49E519D508F209357935599539A5644 -:106F70009A5408FF021AD6C29F5288269F5A9E59D9 -:106F80009D58935E9C5D935C9A5B08084805881148 -:106F9000985FC0D81FD6900CB911A499289285AFDC -:106FA000BF23F4CF288C402896858E262D24069E5C -:106FB00029C020D10FCA33DA20C0B65BFF78C72FB3 -:106FC000D10FC93ADA205BFF75C72FD10FDBD05B39 -:106FD000FE0B2324662B200C63FF7500C72FD10FF7 -:106FE000C72FD10F6C1004C85B29200668941C6859 -:106FF0009607C020D10FC020D10FDA20DB30DC4053 -:10700000DD502E0A005BFE5ED2A0D10F2E200C18A0 -:10701000D6690CEF11A8FF29F286C088798B791AFE -:10702000D6660AEA0A2AA2A368A0048B207AB96865 -:1070300023F2856430621BD670290A802C206828D0 -:1070400020672D21040B881104DD1108DD020DCC11 -:1070500002C0842D4A100DCC021DD66898319D3097 -:107060008A2B99379C340BAA02C0C09C359C369A57 -:10707000322A2C74DB4028F285C0D3288C2028F6D5 -:10708000852C25042D24061FD653DD40AFEE2CE4BD -:10709000CF5BFDEAD2A0D10F00DA20DBE05BFF3F3F -:1070A000C020D10F6C100AD6302A200624160128E1 -:1070B000ACF86583862B2122C0F22A2124CC572AE2 -:1070C000AC010A0A4F2A25247ABB0260037F2C21D7 -:1070D000020C0C4C65C3192E22158D32C0910EDDA9 -:1070E0000C65D39088381ED63364836B8C37C0B858 -:1070F000C0960CB9399914B49A9A120D9911991332 -:107100008F6718D62EC9FB2880217F83168B142CFD -:1071100022002A200C5BFF61D4A064A3B38F6760B8 -:10712000002800002B200C89120CBA11AEAA2CA248 -:10713000861DD6217C9B3E0DBD0A2DD2A368D004AE -:1071400088207D893024A28564436427212E07F797 -:107150003607F90C6F9D01D7F0DA20DB70C1C42D22 -:10716000211F5BFEF889268827DDA009880C7A8B11 -:10717000179A10600006C04063FFCC0000DA208B35 -:10718000105BFEC88D1065A267C0E09E488C649CB1 -:10719000498B658A669B4A9A4B97458F677F730236 -:1071A000600120CD529D10DA20DB302C12015BFEF5 -:1071B000698D10C051D6A08FA7C0C08A68974D9A1C -:1071C0004C8869896A984E994F8E6A8A69AE7E7733 -:1071D000EB01B1AA9E6A9A698B60C0A00B8E1477EE -:1071E000B701C0A1C091C08493159D179516C0D05A -:1071F00025203CC030085801089338C0820833105D -:10720000085B010535400B9D3807DD100BAB100EF8 -:1072100019402A211F07991003DD020DBB020553F7 -:10722000100933020A55112921250A2A14092914A3 -:107230000499110A99020933028A2B2921040BAA05 -:10724000021BD66A0899110955020855020BAA02B9 -:107250009A408920881408991109880219D5EA1DD5 -:10726000D66409880298418B2A9346954783150D69 -:10727000BB0285168D179B448A658966AACAA97CBC -:1072800077CB01B1AA07FB0C9C669A6588268E29EC -:10729000AD87972607EE0C0E0E482E25259B672BF3 -:1072A000200C87131ED5C40CB911AE99289285A75E -:1072B0008828968517D5C8C090A7BB29B4CF871852 -:1072C00063FE3C008C60C0E0C091C0F0C034C0B828 -:1072D0002A210428203C08AA110B8B0103830103F7 -:1072E0009F380B9B39C03208FF10038801089E3875 -:1072F0000C881407EE100FEE020388010898390578 -:10730000BF1029211F0ABB1107881008FF020BAA12 -:107310000218D5BC09291403AA022B212583200BAE -:107320002B1404BB110833110FBB020B99028B14F1 -:107330008F2A0B33020833028B2B64708688689780 -:107340004D984C8769886A93419946974E984FC0EB -:107350007077C701C0719A4718D6260B7C100CECC9 -:107360000208F802984418D6230CBC0208CC029CF0 -:10737000402A200C295CFEC0801FD58E1CD5960C9F -:10738000AE112B2124ACAAAFEEB0BB8F132CE2853B -:1073900028A4CFAFCC2CE6852A22152B2524B1AA10 -:1073A0002A26156490DBC9D28F262E22090DFF08EC -:1073B0002F26060FEE0C0E0E482E25256550E4C034 -:1073C00020D10F00C07093419F4499469A4777C7D8 -:1073D0000A1CD57A2CC022C0810C87381CD6070B1A -:1073E000781008E80208B8020C8802984063FF8011 -:1073F00000CC57DA20DB608C115BFDD629210268B6 -:107400009806689403C020D10F2B221EC0A0292209 -:107410001D2A25027B9901C0B064BFE813D5652CF5 -:10742000B00728B000DA2003880A28824CC0D10BAC -:107430008000DBA065AFE763FFCA000068A779DAC8 -:1074400020DB30DC40DD505BFEE7D2A0D10FC16D08 -:10745000C19D29252C60000429252CD6902624675F -:107460002F2468DA20DB308C11DD502E0A805BFD82 -:1074700044D2A0D10FC168C1A82A252C63FFDD002A -:107480000000C8DF8C268B29ADCC9C260CBB0C0BD6 -:107490000B482B25252A2C74DB602C12015BFD8701 -:1074A000D2A0D10F2A2C748B115BF6B9D2A0D10FC8 -:1074B000DA205BFE3A63FF3800DA20C0B15BFE8A57 -:1074C00064ABF1655F352D2124B1DD2D252463FFEB -:1074D0001FDA202B200C5BFE5663FF1412D5C882E6 -:1074E00020028257C82163FFFC12D5C403E8300490 -:1074F000EE3005B13093209421952263FFFC00000B -:1075000010D5C0910092019302940311D597821077 -:1075100001EA30A21101F031C04004E4160002007B -:1075200011D5B98210234A00032202921011D5828C -:10753000C021921004E4318403830282018100009F -:10754000D23001230000000010D5B09100920193C9 -:1075500002940311D586821001EA30A21101F131A3 -:10756000C04004E41600020011D5A7821013D52BE9 -:10757000032202921004E431840383028201810019 -:1075800000D330013300000010D5A19100810165C6 -:10759000104981026510448103CF1F920193029428 -:1075A0000311D574821001EA30A21101F231C040FA -:1075B00004E41600020011D593821013D5130322A0 -:1075C00002921004E431840383028201C01091030B -:1075D00091029101810000D43001430012D542C0D4 -:1075E0003028374028374428374828374C233D0176 -:1075F0007233ED03020063FFFC00000010D585919B -:107600000092019302940311D5838210921011D538 -:10761000348310032202921011D58012D5469210A5 -:10762000C04004E41600020011D577821013D52D56 -:10763000032202921004E431840383028201810058 -:1076400000D53001530000006C10026E322FD6209E -:10765000056F04043F04745B2A05440C00410400D8 -:10766000331A220A006D490D73630403660CB122BC -:107670000F2211031314736302222C01D10FC83B94 -:10768000D10F000073630CC021D10F000000000077 -:1076900044495630C020D10F6C10020040046B4C9E -:1076A00007032318020219D10F020319C020D10FBA -:1076B0006C100202EA30D10F6C1002CC2503F031BD -:1076C00060000F006F220503F1316000056F230594 -:1076D00003F231000200D10F6C1002CC2502F03011 -:1076E000D10F00006F220402F130D10F6F2304028A -:1076F000F230D10FC020D10F6C1002220A20230AD1 -:10770000006D280E28374028374428374828374C42 -:10771000233D01030200D10F6C100202E431D10FAE -:107720000A004368656C73696F20465720444542E0 -:1077300055473D3020284275696C74204672692097 -:107740004D61792020382031363A30373A333620AF -:107750005044542032303039206F6E20636C656F96 -:1077600070617472612E6173696364657369676EB9 -:107770006572732E636F6D3A2F686F6D652F666546 -:107780006C69782F772F66775F372E31292C20563A -:10779000657273696F6E2054337878203030372EDD -:1077A00030342E3030202D203130303730343030EE -:0877B000100704000071489469 -:00000001FF diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 641640dc7ae54e7d55a86d4d149ac1ec54d3f92e..8ea5e337450752ce4ed182672e27e8b4186280c0 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -601,8 +601,11 @@ static int ioc_settimeout(unsigned int fd, unsigned int cmd, } /* Bluetooth ioctls */ -#define HCIUARTSETPROTO _IOW('U', 200, int) -#define HCIUARTGETPROTO _IOR('U', 201, int) +#define HCIUARTSETPROTO _IOW('U', 200, int) +#define HCIUARTGETPROTO _IOR('U', 201, int) +#define HCIUARTGETDEVICE _IOR('U', 202, int) +#define HCIUARTSETFLAGS _IOW('U', 203, int) +#define HCIUARTGETFLAGS _IOR('U', 204, int) #define BNEPCONNADD _IOW('B', 200, int) #define BNEPCONNDEL _IOW('B', 201, int) @@ -1328,6 +1331,8 @@ COMPATIBLE_IOCTL(HCISETLINKPOL) COMPATIBLE_IOCTL(HCISETLINKMODE) COMPATIBLE_IOCTL(HCISETACLMTU) COMPATIBLE_IOCTL(HCISETSCOMTU) +COMPATIBLE_IOCTL(HCIBLOCKADDR) +COMPATIBLE_IOCTL(HCIUNBLOCKADDR) COMPATIBLE_IOCTL(HCIINQUIRY) COMPATIBLE_IOCTL(HCIUARTSETPROTO) COMPATIBLE_IOCTL(HCIUARTGETPROTO) diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 2fc8e14cc24a8b6ce36a39d4d929539d38a5710e..9aa9bcadf869ac5948e712532de794a52f52f529 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -276,6 +276,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \ $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),) unifdef-y += kvm_para.h endif +unifdef-y += l2tp.h unifdef-y += llc.h unifdef-y += loop.h unifdef-y += lp.h diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 817b23705c91f62d1ed3a066eddcffc724da77c4..f6481daf6e52b2cc6387952120bdcd05bc26f051 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -431,6 +431,14 @@ struct atm_dev *atm_dev_register(const char *type,const struct atmdev_ops *ops, int number,unsigned long *flags); /* number == -1: pick first available */ struct atm_dev *atm_dev_lookup(int number); void atm_dev_deregister(struct atm_dev *dev); + +/* atm_dev_signal_change + * + * Propagate lower layer signal change in atm_dev->signal to netdevice. + * The event will be sent via a notifier call chain. + */ +void atm_dev_signal_change(struct atm_dev *dev, char signal); + void vcc_insert_socket(struct sock *sk); @@ -510,6 +518,15 @@ void register_atm_ioctl(struct atm_ioctl *); */ void deregister_atm_ioctl(struct atm_ioctl *); + +/* register_atmdevice_notifier - register atm_dev notify events + * + * Clients like br2684 will register notify events + * Currently we notify of signal found/lost + */ +int register_atmdevice_notifier(struct notifier_block *nb); +void unregister_atmdevice_notifier(struct notifier_block *nb); + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 7f437ca1ed4487652a7a1fa35b8684200912e117..b840a496028259076caff141de3a6a92ed177a3a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -1,6 +1,13 @@ #define PHY_ID_BCM50610 0x0143bd60 #define PHY_ID_BCM50610M 0x0143bd70 +#define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 +#define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5482 0x0143bcb0 +#define PHY_ID_BCM5411 0x00206070 +#define PHY_ID_BCM5421 0x002060e0 +#define PHY_ID_BCM5464 0x002060b0 +#define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM57780 0x03625d90 #define PHY_BCM_OUI_MASK 0xfffffc00 diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h index 2a61eb1beb85318bcd1972014a52fd8e3dff6519..d9cb19b7cff79c03feaed0421169ada719fd2b2b 100644 --- a/include/linux/caif/caif_socket.h +++ b/include/linux/caif/caif_socket.h @@ -62,6 +62,7 @@ enum caif_channel_priority { * @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing. * @CAIFPROTO_UTIL: Utility (Psock) channel. * @CAIFPROTO_RFM: Remote File Manager + * @CAIFPROTO_DEBUG: Debug link * * This enum defines the CAIF Channel type to be used. This defines * the service to connect to on the modem. @@ -72,6 +73,7 @@ enum caif_protocol_type { CAIFPROTO_DATAGRAM_LOOP, CAIFPROTO_UTIL, CAIFPROTO_RFM, + CAIFPROTO_DEBUG, _CAIFPROTO_MAX }; #define CAIFPROTO_MAX _CAIFPROTO_MAX @@ -83,6 +85,28 @@ enum caif_protocol_type { enum caif_at_type { CAIF_ATTYPE_PLAIN = 2 }; + /** + * enum caif_debug_type - Content selection for debug connection + * @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain + * both trace and interactive debug. + * @CAIF_DEBUG_TRACE: Connection contains trace only. + * @CAIF_DEBUG_INTERACTIVE: Connection to interactive debug. + */ +enum caif_debug_type { + CAIF_DEBUG_TRACE_INTERACTIVE = 0, + CAIF_DEBUG_TRACE, + CAIF_DEBUG_INTERACTIVE, +}; + +/** + * enum caif_debug_service - Debug Service Endpoint + * @CAIF_RADIO_DEBUG_SERVICE: Debug service on the Radio sub-system + * @CAIF_APP_DEBUG_SERVICE: Debug for the applications sub-system + */ +enum caif_debug_service { + CAIF_RADIO_DEBUG_SERVICE = 1, + CAIF_APP_DEBUG_SERVICE +}; /** * struct sockaddr_caif - the sockaddr structure for CAIF sockets. @@ -109,6 +133,12 @@ enum caif_at_type { * * @u.rfm.volume: Volume to mount. * + * @u.dbg: Applies when family = CAIFPROTO_DEBUG. + * + * @u.dbg.type: Type of debug connection to set up + * (caif_debug_type). + * + * @u.dbg.service: Service sub-system to connect (caif_debug_service * Description: * This structure holds the connect parameters used for setting up a * CAIF Channel. It defines the service to connect to on the modem. @@ -130,6 +160,10 @@ struct sockaddr_caif { __u32 connection_id; char volume[16]; } rfm; /* CAIFPROTO_RFM */ + struct { + __u8 type; /* type:enum caif_debug_type */ + __u8 service; /* service:caif_debug_service */ + } dbg; /* CAIFPROTO_DEBUG */ } u; }; diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h new file mode 100644 index 0000000000000000000000000000000000000000..72b713ab57e9af600ab37de0f665bc4f5dcf248a --- /dev/null +++ b/include/linux/can/platform/flexcan.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2010 Marc Kleine-Budde + * + * This file is released under the GPLv2 + * + */ + +#ifndef __CAN_PLATFORM_FLEXCAN_H +#define __CAN_PLATFORM_FLEXCAN_H + +/** + * struct flexcan_platform_data - flex CAN controller platform data + * @transceiver_enable: - called to power on/off the transceiver + * + */ +struct flexcan_platform_data { + void (*transceiver_switch)(int enable); +}; + +#endif /* __CAN_PLATFORM_FLEXCAN_H */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e3d00fdb858dc3cbf7e35b7d75a93b9c4b7321f3..ed3e92e41c6e5683ad3dbb823e48259f5150ac33 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -578,6 +578,7 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); int cgroup_scan_tasks(struct cgroup_scanner *scan); int cgroup_attach_task(struct cgroup *, struct task_struct *); +int cgroup_attach_task_current_cg(struct task_struct *); /* * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works @@ -634,6 +635,12 @@ static inline int cgroupstats_build(struct cgroupstats *stats, return -EINVAL; } +/* No cgroups - nothing to do */ +static inline int cgroup_attach_task_current_cg(struct task_struct *t) +{ + return 0; +} + #endif /* !CONFIG_CGROUPS */ #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/device.h b/include/linux/device.h index 0713e10571dd61eb0abf1388e0c415ba6d143b6d..6a8276f683b6fe8ca5d5a770cd0b1207db676924 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -638,43 +638,103 @@ extern void sysdev_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(const struct device *dev); -#define dev_printk(level, dev, format, arg...) \ - printk(level "%s %s: " format , dev_driver_string(dev) , \ - dev_name(dev) , ## arg) - -#define dev_emerg(dev, format, arg...) \ - dev_printk(KERN_EMERG , dev , format , ## arg) -#define dev_alert(dev, format, arg...) \ - dev_printk(KERN_ALERT , dev , format , ## arg) -#define dev_crit(dev, format, arg...) \ - dev_printk(KERN_CRIT , dev , format , ## arg) -#define dev_err(dev, format, arg...) \ - dev_printk(KERN_ERR , dev , format , ## arg) -#define dev_warn(dev, format, arg...) \ - dev_printk(KERN_WARNING , dev , format , ## arg) -#define dev_notice(dev, format, arg...) \ - dev_printk(KERN_NOTICE , dev , format , ## arg) -#define dev_info(dev, format, arg...) \ - dev_printk(KERN_INFO , dev , format , ## arg) + + +#ifdef CONFIG_PRINTK + +extern int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +extern int dev_emerg(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_alert(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_crit(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_err(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_warn(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int dev_notice(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int _dev_info(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +#else + +static inline int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +static inline int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + { return 0; } + +static inline int dev_emerg(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_emerg(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_crit(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_crit(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_alert(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_alert(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_err(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_err(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_warn(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_warn(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int dev_notice(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int dev_notice(const struct device *dev, const char *fmt, ...) + { return 0; } +static inline int _dev_info(const struct device *dev, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +static inline int _dev_info(const struct device *dev, const char *fmt, ...) + { return 0; } + +#endif + +/* + * Stupid hackaround for existing uses of non-printk uses dev_info + * + * Note that the definition of dev_info below is actually _dev_info + * and a macro is used to avoid redefining dev_info + */ + +#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) #if defined(DEBUG) #define dev_dbg(dev, format, arg...) \ - dev_printk(KERN_DEBUG , dev , format , ## arg) + dev_printk(KERN_DEBUG, dev, format, ##arg) #elif defined(CONFIG_DYNAMIC_DEBUG) -#define dev_dbg(dev, format, ...) do { \ +#define dev_dbg(dev, format, ...) \ +do { \ dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ - } while (0) +} while (0) #else -#define dev_dbg(dev, format, arg...) \ - ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) +#define dev_dbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ + 0; \ +}) #endif #ifdef VERBOSE_DEBUG #define dev_vdbg dev_dbg #else - -#define dev_vdbg(dev, format, arg...) \ - ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) +#define dev_vdbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ + 0; \ +}) #endif /* diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index a55c873e8b6667bcd79796b9439743fad7b655c2..c4627cbdb8e038e4f61187db301c13a6acc12742 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -30,6 +30,7 @@ #define PCI_EEPROM_WIDTH_93C46 6 #define PCI_EEPROM_WIDTH_93C56 8 #define PCI_EEPROM_WIDTH_93C66 8 +#define PCI_EEPROM_WIDTH_93C86 8 #define PCI_EEPROM_WIDTH_OPCODE 3 #define PCI_EEPROM_WRITE_OPCODE 0x05 #define PCI_EEPROM_READ_OPCODE 0x06 diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 3d7a6687d247c32577cf6bad2f7cb483fd434c51..848480bc2bf93846b14e43c0c94493cb2109f889 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -126,6 +126,20 @@ static inline void random_ether_addr(u8 *addr) addr [0] |= 0x02; /* set local assignment bit (IEEE802) */ } +/** + * dev_hw_addr_random - Create random MAC and set device flag + * @dev: pointer to net_device structure + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Generate random MAC to be used by a device and set addr_assign_type + * so the state can be read by sysfs and be used by udev. + */ +static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr) +{ + dev->addr_assign_type |= NET_ADDR_RANDOM; + random_ether_addr(hwaddr); +} + /** * compare_ether_addr - Compare two Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index b4207ca3ad5277c5ae66ea2f2ccc41fd961a0426..991269e5b152f1179aefb68e947111104d25bce0 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -386,6 +386,15 @@ struct ethtool_rxnfc { __u32 rule_locs[0]; }; +struct ethtool_rxfh_indir { + __u32 cmd; + /* On entry, this is the array size of the user buffer. On + * return from ETHTOOL_GRXFHINDIR, this is the array size of + * the hardware indirection table. */ + __u32 size; + __u32 ring_index[0]; /* ring/queue index for each hash value */ +}; + struct ethtool_rx_ntuple_flow_spec { __u32 flow_type; union { @@ -459,7 +468,7 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data); u32 ethtool_op_get_ufo(struct net_device *dev); int ethtool_op_set_ufo(struct net_device *dev, u32 data); u32 ethtool_op_get_flags(struct net_device *dev); -int ethtool_op_set_flags(struct net_device *dev, u32 data); +int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); void ethtool_ntuple_flush(struct net_device *dev); /** @@ -578,6 +587,10 @@ struct ethtool_ops { int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *); int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *); + int (*get_rxfh_indir)(struct net_device *, + struct ethtool_rxfh_indir *); + int (*set_rxfh_indir)(struct net_device *, + const struct ethtool_rxfh_indir *); }; #endif /* __KERNEL__ */ @@ -588,29 +601,29 @@ struct ethtool_ops { #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ #define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ #define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ -#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ -#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ #define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ #define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */ -#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ -#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ #define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ #define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ #define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ #define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ #define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ #define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ -#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ -#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ -#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ -#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ #define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable * (ethtool_value) */ #define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable * (ethtool_value). */ #define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ #define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ -#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ #define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ #define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ #define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ @@ -621,8 +634,8 @@ struct ethtool_ops { #define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ #define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ #define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ -#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ -#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ +#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ +#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ #define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ #define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ @@ -639,6 +652,8 @@ struct ethtool_ops { #define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ #define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */ #define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ +#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */ +#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET @@ -647,18 +662,18 @@ struct ethtool_ops { /* Indicates what features are supported by the interface. */ #define SUPPORTED_10baseT_Half (1 << 0) #define SUPPORTED_10baseT_Full (1 << 1) -#define SUPPORTED_100baseT_Half (1 << 2) -#define SUPPORTED_100baseT_Full (1 << 3) +#define SUPPORTED_100baseT_Half (1 << 2) +#define SUPPORTED_100baseT_Full (1 << 3) #define SUPPORTED_1000baseT_Half (1 << 4) #define SUPPORTED_1000baseT_Full (1 << 5) #define SUPPORTED_Autoneg (1 << 6) #define SUPPORTED_TP (1 << 7) #define SUPPORTED_AUI (1 << 8) #define SUPPORTED_MII (1 << 9) -#define SUPPORTED_FIBRE (1 << 10) +#define SUPPORTED_FIBRE (1 << 10) #define SUPPORTED_BNC (1 << 11) #define SUPPORTED_10000baseT_Full (1 << 12) -#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Pause (1 << 13) #define SUPPORTED_Asym_Pause (1 << 14) #define SUPPORTED_2500baseX_Full (1 << 15) #define SUPPORTED_Backplane (1 << 16) @@ -668,8 +683,8 @@ struct ethtool_ops { #define SUPPORTED_10000baseR_FEC (1 << 20) /* Indicates what features are advertised by the interface. */ -#define ADVERTISED_10baseT_Half (1 << 0) -#define ADVERTISED_10baseT_Full (1 << 1) +#define ADVERTISED_10baseT_Half (1 << 0) +#define ADVERTISED_10baseT_Full (1 << 1) #define ADVERTISED_100baseT_Half (1 << 2) #define ADVERTISED_100baseT_Full (1 << 3) #define ADVERTISED_1000baseT_Half (1 << 4) @@ -708,12 +723,12 @@ struct ethtool_ops { #define DUPLEX_FULL 0x01 /* Which connector port. */ -#define PORT_TP 0x00 +#define PORT_TP 0x00 #define PORT_AUI 0x01 #define PORT_MII 0x02 #define PORT_FIBRE 0x03 #define PORT_BNC 0x04 -#define PORT_DA 0x05 +#define PORT_DA 0x05 #define PORT_NONE 0xef #define PORT_OTHER 0xff @@ -727,7 +742,7 @@ struct ethtool_ops { /* Enable or disable autonegotiation. If this is set to enable, * the forced link modes above are completely ignored. */ -#define AUTONEG_DISABLE 0x00 +#define AUTONEG_DISABLE 0x00 #define AUTONEG_ENABLE 0x01 /* Mode MDI or MDI-X */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 151f5d703b7e19d158e500d8ebdf688b3d3dd047..69b43dbea6c6ac444dfbf8c0daf1650942a9a9b1 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -91,6 +91,54 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ #define BPF_TAX 0x00 #define BPF_TXA 0x80 +enum { + BPF_S_RET_K = 0, + BPF_S_RET_A, + BPF_S_ALU_ADD_K, + BPF_S_ALU_ADD_X, + BPF_S_ALU_SUB_K, + BPF_S_ALU_SUB_X, + BPF_S_ALU_MUL_K, + BPF_S_ALU_MUL_X, + BPF_S_ALU_DIV_X, + BPF_S_ALU_AND_K, + BPF_S_ALU_AND_X, + BPF_S_ALU_OR_K, + BPF_S_ALU_OR_X, + BPF_S_ALU_LSH_K, + BPF_S_ALU_LSH_X, + BPF_S_ALU_RSH_K, + BPF_S_ALU_RSH_X, + BPF_S_ALU_NEG, + BPF_S_LD_W_ABS, + BPF_S_LD_H_ABS, + BPF_S_LD_B_ABS, + BPF_S_LD_W_LEN, + BPF_S_LD_W_IND, + BPF_S_LD_H_IND, + BPF_S_LD_B_IND, + BPF_S_LD_IMM, + BPF_S_LDX_W_LEN, + BPF_S_LDX_B_MSH, + BPF_S_LDX_IMM, + BPF_S_MISC_TAX, + BPF_S_MISC_TXA, + BPF_S_ALU_DIV_K, + BPF_S_LD_MEM, + BPF_S_LDX_MEM, + BPF_S_ST, + BPF_S_STX, + BPF_S_JMP_JA, + BPF_S_JMP_JEQ_K, + BPF_S_JMP_JEQ_X, + BPF_S_JMP_JGE_K, + BPF_S_JMP_JGE_X, + BPF_S_JMP_JGT_K, + BPF_S_JMP_JGT_X, + BPF_S_JMP_JSET_K, + BPF_S_JMP_JSET_X, +}; + #ifndef BPF_MAXINSNS #define BPF_MAXINSNS 4096 #endif diff --git a/include/linux/if.h b/include/linux/if.h index be350e62a905438533b0231439d8d65fd53d7de4..53558ec59e1b16d4272f9cdd51e1a0ba4957facc 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -73,6 +73,8 @@ #define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */ #define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */ #define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ +#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ +#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h index cd525fae3c98f260453571c0d084d8e1c161f09f..2c7994372bde965fe5b37bd6335963ef3d490c3d 100644 --- a/include/linux/if_bonding.h +++ b/include/linux/if_bonding.h @@ -83,6 +83,7 @@ #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ +#define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ /* hashing types */ #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 938b7e81df9527a96a8fca8035d6a08dc1769550..0d241a5c4909b8bdc99f107b1ba4ac60de9996ae 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -102,8 +102,6 @@ struct __fdb_entry { #include extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); -extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, - struct sk_buff *skb); extern int (*br_should_route_hook)(struct sk_buff *skb); #endif diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index bed7a4682b90734935e3dd6680d5e646b0b895c0..c831467774d0e71963ccf774d310bd4f6dd91b8e 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -119,7 +119,7 @@ struct ethhdr { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_source[ETH_ALEN]; /* source ether addr */ __be16 h_proto; /* packet type ID field */ -} __attribute__((packed)); +} __packed; #ifdef __KERNEL__ #include diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h index 5459c5c0993035e66b3e27189e4d86dda165c14f..9947c39e62f6fa49ea95505d0afc8053a27fbebb 100644 --- a/include/linux/if_fddi.h +++ b/include/linux/if_fddi.h @@ -67,7 +67,7 @@ struct fddi_8022_1_hdr { __u8 dsap; /* destination service access point */ __u8 ssap; /* source service access point */ __u8 ctrl; /* control byte #1 */ -} __attribute__ ((packed)); +} __packed; /* Define 802.2 Type 2 header */ struct fddi_8022_2_hdr { @@ -75,7 +75,7 @@ struct fddi_8022_2_hdr { __u8 ssap; /* source service access point */ __u8 ctrl_1; /* control byte #1 */ __u8 ctrl_2; /* control byte #2 */ -} __attribute__ ((packed)); +} __packed; /* Define 802.2 SNAP header */ #define FDDI_K_OUI_LEN 3 @@ -85,7 +85,7 @@ struct fddi_snap_hdr { __u8 ctrl; /* always 0x03 */ __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ __be16 ethertype; /* packet type ID field */ -} __attribute__ ((packed)); +} __packed; /* Define FDDI LLC frame header */ struct fddihdr { @@ -98,7 +98,7 @@ struct fddihdr { struct fddi_8022_2_hdr llc_8022_2; struct fddi_snap_hdr llc_snap; } hdr; -} __attribute__ ((packed)); +} __packed; #ifdef __KERNEL__ #include diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 80b3a1056a5f8526c8604341f0a5ddce3f35a5b0..191ee0869bc16d7f1df9c42e2b8e4b881c17cd28 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -135,7 +135,7 @@ struct frhdr __be16 PID; #define IP_NLPID pad -} __attribute__((packed)); +} __packed; /* see RFC 1490 for the definition of the following */ #define FRAD_I_UI 0x03 diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h index 8d038eb8db5c1e3e4adf0dce715081a72e8fd449..5fe5f307c6f560f424f16252ad61a1a9bf4b7b45 100644 --- a/include/linux/if_hippi.h +++ b/include/linux/if_hippi.h @@ -104,7 +104,7 @@ struct hippi_fp_hdr { __be32 fixed; #endif __be32 d2_size; -} __attribute__ ((packed)); +} __packed; struct hippi_le_hdr { #if defined (__BIG_ENDIAN_BITFIELD) @@ -129,7 +129,7 @@ struct hippi_le_hdr { __u8 daddr[HIPPI_ALEN]; __u16 locally_administered; __u8 saddr[HIPPI_ALEN]; -} __attribute__ ((packed)); +} __packed; #define HIPPI_OUI_LEN 3 /* @@ -142,12 +142,12 @@ struct hippi_snap_hdr { __u8 ctrl; /* always 0x03 */ __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ __be16 ethertype; /* packet type ID field */ -} __attribute__ ((packed)); +} __packed; struct hippi_hdr { struct hippi_fp_hdr fp; struct hippi_le_hdr le; struct hippi_snap_hdr snap; -} __attribute__ ((packed)); +} __packed; #endif /* _LINUX_IF_HIPPI_H */ diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 85c812db5a3f09e75f6bebc646c86300d6621594..7fcad2e1be3df0491c20ecb5076a56d7bdfbb200 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -4,7 +4,7 @@ #include #include -/* The struct should be in sync with struct net_device_stats */ +/* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { __u32 rx_packets; /* total packets received */ __u32 tx_packets; /* total packets transmitted */ @@ -37,6 +37,7 @@ struct rtnl_link_stats { __u32 tx_compressed; }; +/* The main device statistics structure */ struct rtnl_link_stats64 { __u64 rx_packets; /* total packets received */ __u64 tx_packets; /* total packets transmitted */ diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 1ffaeffeff740f83e3e79b5d6e03b306410d6b3f..35280b302290459ff20293b2e4bcafd5aa6b9b2a 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -6,6 +6,7 @@ #include #include #include +#include #if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE) struct socket *macvtap_get_socket(struct file *); @@ -27,14 +28,16 @@ struct macvtap_queue; * struct macvlan_rx_stats - MACVLAN percpu rx stats * @rx_packets: number of received packets * @rx_bytes: number of received bytes - * @multicast: number of received multicast packets + * @rx_multicast: number of received multicast packets + * @syncp: synchronization point for 64bit counters * @rx_errors: number of errors */ struct macvlan_rx_stats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long multicast; - unsigned long rx_errors; + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + struct u64_stats_sync syncp; + unsigned long rx_errors; }; struct macvlan_dev { @@ -56,12 +59,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, { struct macvlan_rx_stats *rx_stats; - rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id()); + rx_stats = this_cpu_ptr(vlan->rx_stats); if (likely(success)) { + u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++;; rx_stats->rx_bytes += len; if (multicast) - rx_stats->multicast++; + rx_stats->rx_multicast++; + u64_stats_update_end(&rx_stats->syncp); } else { rx_stats->rx_errors++; } @@ -86,8 +91,4 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops); extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev); - -extern struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *, - struct sk_buff *); - #endif /* _LINUX_IF_MACVLAN_H */ diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h index 6ac23ef1801aaf9945dfdecfe386be51d7d9ac44..72bfa5a034dd430a1a79aa2ef1cb24a1b79836df 100644 --- a/include/linux/if_packet.h +++ b/include/linux/if_packet.h @@ -48,6 +48,7 @@ struct sockaddr_ll { #define PACKET_LOSS 14 #define PACKET_VNET_HDR 15 #define PACKET_TX_TIMESTAMP 16 +#define PACKET_TIMESTAMP 17 struct tpacket_stats { unsigned int tp_packets; diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index a6577af0c4e64de0f93e6c88e70264eab77c3fc5..1925e0c3f1623e0d515a22ac6cbc01074fd83e68 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -59,7 +59,7 @@ struct sockaddr_pppox { union{ struct pppoe_addr pppoe; }sa_addr; -}__attribute__ ((packed)); +} __packed; /* The use of the above union isn't viable because the size of this * struct must stay fixed over time -- applications use sizeof(struct @@ -70,7 +70,7 @@ struct sockaddr_pppol2tp { sa_family_t sa_family; /* address family, AF_PPPOX */ unsigned int sa_protocol; /* protocol identifier */ struct pppol2tp_addr pppol2tp; -}__attribute__ ((packed)); +} __packed; /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 * bits. So we need a different sockaddr structure. @@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 { sa_family_t sa_family; /* address family, AF_PPPOX */ unsigned int sa_protocol; /* protocol identifier */ struct pppol2tpv3_addr pppol2tp; -} __attribute__ ((packed)); +} __packed; /********************************************************************* * @@ -129,7 +129,7 @@ struct pppoe_hdr { __be16 sid; __be16 length; struct pppoe_tag tag[0]; -} __attribute__ ((packed)); +} __packed; /* Length of entire PPPoE + PPP header */ #define PPPOE_SES_HLEN 8 diff --git a/include/linux/in.h b/include/linux/in.h index 583c76f9c30fbef3a7427e5acf1e3159dfe65318..41d88a4689af0a77a000cfc88437a70d8abdc1fb 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -85,6 +85,7 @@ struct in_addr { #define IP_RECVORIGDSTADDR IP_ORIGDSTADDR #define IP_MINTTL 21 +#define IP_NODEFRAG 22 /* IP_MTU_DISCOVER values */ #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h index dfc17036284218c92d4c0ecf6cdfe5f0c4a66a55..9708de265bb1e24f1b6130c2f0ca3ace1b249dfc 100644 --- a/include/linux/ip_vs.h +++ b/include/linux/ip_vs.h @@ -19,6 +19,7 @@ */ #define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ #define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ +#define IP_VS_SVC_F_ONEPACKET 0x0004 /* one-packet scheduling */ /* * Destination Server Flags @@ -85,6 +86,7 @@ #define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ #define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ +#define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ #define IP_VS_SCHEDNAME_MAXLEN 16 #define IP_VS_IFNAME_MAXLEN 16 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 99e1ab7e3eec2639720858471af957e42bdb05e9..ab9e9e89e4074318405a595c1147642e344255c0 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -58,7 +58,7 @@ struct ipv6_opt_hdr { /* * TLV encoded option data follows. */ -} __attribute__ ((packed)); /* required for some archs */ +} __packed; /* required for some archs */ #define ipv6_destopt_hdr ipv6_opt_hdr #define ipv6_hopopt_hdr ipv6_opt_hdr @@ -99,7 +99,7 @@ struct ipv6_destopt_hao { __u8 type; __u8 length; struct in6_addr addr; -} __attribute__ ((__packed__)); +} __packed; /* * IPv6 fixed header @@ -246,7 +246,7 @@ struct inet6_skb_parm { __u16 srcrt; __u16 dst1; __u16 lastopt; - __u32 nhoff; + __u16 nhoff; __u16 flags; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) __u16 dsthao; diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h index b9b5a684ed6989be4095c74a4bcf0b355a9fd8b1..b8c23f88dd549dac4d9ca981ea90a16188fc1c8b 100644 --- a/include/linux/isdnif.h +++ b/include/linux/isdnif.h @@ -317,7 +317,7 @@ typedef struct T30_s { __u8 r_scantime; __u8 r_id[FAXIDLEN]; __u8 r_code; -} __attribute__((packed)) T30_s; +} __packed T30_s; #define ISDN_TTY_FAX_CONN_IN 0 #define ISDN_TTY_FAX_CONN_OUT 1 diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 8317ec4b9f3b4bbe615109073e7bd8ad9b946c1b..01dfc05ef4ac83eeb13eb0c7e4181c4e75fa32e7 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -171,6 +171,11 @@ static inline void might_fault(void) } #endif +struct va_format { + const char *fmt; + va_list *va; +}; + extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(long time); NORET_TYPE void panic(const char * fmt, ...) diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h index da0341b8ca0a3cf19e5b1ba6a9ea109cb9adbae9..14ba4452296e0bcdb3e896cb4145db3225a9455d 100644 --- a/include/linux/ks8842.h +++ b/include/linux/ks8842.h @@ -25,10 +25,14 @@ * struct ks8842_platform_data - Platform data of the KS8842 network driver * @macaddr: The MAC address of the device, set to all 0:s to use the on in * the chip. + * @rx_dma_channel: The DMA channel to use for RX, -1 for none. + * @tx_dma_channel: The DMA channel to use for TX, -1 for none. * */ struct ks8842_platform_data { u8 macaddr[ETH_ALEN]; + int rx_dma_channel; + int tx_dma_channel; }; #endif diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 78c3bed1c3f54f44557ad6c1285a85e28f4895c5..b5e7f22024848e956265426386b1f7999f87b7c5 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -251,7 +251,7 @@ struct mISDNhead { unsigned int prim; unsigned int id; -} __attribute__((packed)); +} __packed; #define MISDN_HEADER_LEN sizeof(struct mISDNhead) #define MAX_DATA_SIZE 2048 diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 2ed4fb8bbd517c606c459d29b2072ab49e6153e6..d0f08018335d4a8144e9b524e93484beab39ebf0 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -12,6 +12,7 @@ #define MARVELL_PHY_ID_88E1121R 0x01410cb0 #define MARVELL_PHY_ID_88E1145 0x01410cd0 #define MARVELL_PHY_ID_88E1240 0x01410e30 +#define MARVELL_PHY_ID_88EC048 0x01410e90 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 155719dab813e2fac8b52bc5a3ae3afdbcbbc8ce..bb58854a806196d8dea6ab9599daa244012ef5d8 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h @@ -88,7 +88,7 @@ struct nbd_request { char handle[8]; __be64 from; __be32 len; -} __attribute__ ((packed)); +} __packed; /* * This is the reply packet that nbd-server sends back to the client after diff --git a/include/linux/ncp.h b/include/linux/ncp.h index 99f0adeeb3f348e58c65312133217055a6ccf3c0..3ace8370e61e9855cfda5d7237362c1538c701a6 100644 --- a/include/linux/ncp.h +++ b/include/linux/ncp.h @@ -27,7 +27,7 @@ struct ncp_request_header { __u8 conn_high; __u8 function; __u8 data[0]; -} __attribute__((packed)); +} __packed; #define NCP_REPLY (0x3333) #define NCP_WATCHDOG (0x3E3E) @@ -42,7 +42,7 @@ struct ncp_reply_header { __u8 completion_code; __u8 connection_state; __u8 data[0]; -} __attribute__((packed)); +} __packed; #define NCP_VOLNAME_LEN (16) #define NCP_NUMBER_OF_VOLUMES (256) @@ -158,7 +158,7 @@ struct nw_info_struct { #ifdef __KERNEL__ struct nw_nfs_info nfs; #endif -} __attribute__((packed)); +} __packed; /* modify mask - use with MODIFY_DOS_INFO structure */ #define DM_ATTRIBUTES (cpu_to_le32(0x02)) @@ -190,12 +190,12 @@ struct nw_modify_dos_info { __u16 inheritanceGrantMask; __u16 inheritanceRevokeMask; __u32 maximumSpace; -} __attribute__((packed)); +} __packed; struct nw_search_sequence { __u8 volNumber; __u32 dirBase; __u32 sequence; -} __attribute__((packed)); +} __packed; #endif /* _LINUX_NCP_H */ diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index 5ec9ca671687e7c174b76b83995f209c0b0eacba..8da05bc098ca86b2ba5758af8b0b7885c7894dd3 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h @@ -104,13 +104,13 @@ struct ncp_server { unsigned int state; /* STREAM only: receiver state */ struct { - __u32 magic __attribute__((packed)); - __u32 len __attribute__((packed)); - __u16 type __attribute__((packed)); - __u16 p1 __attribute__((packed)); - __u16 p2 __attribute__((packed)); - __u16 p3 __attribute__((packed)); - __u16 type2 __attribute__((packed)); + __u32 magic __packed; + __u32 len __packed; + __u16 type __packed; + __u16 p1 __packed; + __u16 p2 __packed; + __u16 p3 __packed; + __u16 type2 __packed; } buf; /* STREAM only: temporary buffer */ unsigned char* ptr; /* STREAM only: pointer to data */ size_t len; /* STREAM only: length of data to receive */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2f22119b4b0835aff9f941655781feaac2d7bcdc..d52b570569be674c760bc1dc4c3d1754e802d668 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -54,6 +54,7 @@ struct vlan_group; struct netpoll_info; +struct phy_device; /* 802.11 specific */ struct wireless_dev; /* source back-compat hooks */ @@ -65,6 +66,11 @@ struct wireless_dev; #define HAVE_FREE_NETDEV /* free_netdev() */ #define HAVE_NETDEV_PRIV /* netdev_priv() */ +/* hardware address assignment types */ +#define NET_ADDR_PERM 0 /* address is permanent (default) */ +#define NET_ADDR_RANDOM 1 /* address is generated randomly */ +#define NET_ADDR_STOLEN 2 /* address is stolen from other device */ + /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_DROP 1 /* packet dropped */ @@ -159,45 +165,39 @@ static inline bool dev_xmit_complete(int rc) #define MAX_HEADER (LL_MAX_HEADER + 48) #endif -#endif /* __KERNEL__ */ - /* - * Network device statistics. Akin to the 2.0 ether stats but - * with byte counters. + * Old network device statistics. Fields are native words + * (unsigned long) so they can be read and written atomically. */ struct net_device_stats { - unsigned long rx_packets; /* total packets received */ - unsigned long tx_packets; /* total packets transmitted */ - unsigned long rx_bytes; /* total bytes received */ - unsigned long tx_bytes; /* total bytes transmitted */ - unsigned long rx_errors; /* bad packets received */ - unsigned long tx_errors; /* packet transmit problems */ - unsigned long rx_dropped; /* no space in linux buffers */ - unsigned long tx_dropped; /* no space available in linux */ - unsigned long multicast; /* multicast packets received */ + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_errors; + unsigned long tx_errors; + unsigned long rx_dropped; + unsigned long tx_dropped; + unsigned long multicast; unsigned long collisions; - - /* detailed rx_errors: */ unsigned long rx_length_errors; - unsigned long rx_over_errors; /* receiver ring buff overflow */ - unsigned long rx_crc_errors; /* recved pkt with crc error */ - unsigned long rx_frame_errors; /* recv'd frame alignment error */ - unsigned long rx_fifo_errors; /* recv'r fifo overrun */ - unsigned long rx_missed_errors; /* receiver missed packet */ - - /* detailed tx_errors */ + unsigned long rx_over_errors; + unsigned long rx_crc_errors; + unsigned long rx_frame_errors; + unsigned long rx_fifo_errors; + unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; - - /* for cslip etc */ unsigned long rx_compressed; unsigned long tx_compressed; }; +#endif /* __KERNEL__ */ + /* Media selection options. */ enum { @@ -381,6 +381,8 @@ enum gro_result { }; typedef enum gro_result gro_result_t; +typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb); + extern void __napi_schedule(struct napi_struct *n); static inline int napi_disable_pending(struct napi_struct *n) @@ -504,9 +506,9 @@ struct netdev_queue { * please use this field instead of dev->trans_start */ unsigned long trans_start; - unsigned long tx_bytes; - unsigned long tx_packets; - unsigned long tx_dropped; + u64 tx_bytes; + u64 tx_packets; + u64 tx_dropped; } ____cacheline_aligned_in_smp; #ifdef CONFIG_RPS @@ -660,10 +662,19 @@ struct netdev_rx_queue { * Callback uses when the transmitter has not made any progress * for dev->watchdog ticks. * + * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + * struct rtnl_link_stats64 *storage); * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage - * statistics. If not defined, the counters in dev->stats will - * be used. + * statistics. Drivers must do one of the following: + * 1. Define @ndo_get_stats64 to fill in a zero-initialised + * rtnl_link_stats64 structure passed by the caller. + * 2. Define @ndo_get_stats to update a net_device_stats structure + * (which should normally be dev->stats) and return a pointer to + * it. The structure may be changed asynchronously only if each + * field is written atomically. + * 3. Update dev->stats asynchronously and atomically, and define + * neither operation. * * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); * If device support VLAN receive accleration @@ -718,6 +729,8 @@ struct net_device_ops { struct neigh_parms *); void (*ndo_tx_timeout) (struct net_device *dev); + struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); void (*ndo_vlan_rx_register)(struct net_device *dev, @@ -728,6 +741,8 @@ struct net_device_ops { unsigned short vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); + int (*ndo_netpoll_setup)(struct net_device *dev, + struct netpoll_info *info); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, @@ -847,7 +862,8 @@ struct net_device { #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) +#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ + NETIF_F_TSO6 | NETIF_F_UFO) #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) @@ -908,6 +924,7 @@ struct net_device { /* Interface address info. */ unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ + unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ @@ -957,6 +974,8 @@ struct net_device { #endif struct netdev_queue rx_queue; + rx_handler_func_t *rx_handler; + void *rx_handler_data; struct netdev_queue *_tx ____cacheline_aligned_in_smp; @@ -1024,10 +1043,6 @@ struct net_device { /* mid-layer private */ void *ml_priv; - /* bridge stuff */ - struct net_bridge_port *br_port; - /* macvlan */ - struct macvlan_port *macvlan_port; /* GARP */ struct garp_port *garp_port; @@ -1057,6 +1072,9 @@ struct net_device { #endif /* n-tuple filter list attached to this device */ struct ethtool_rx_ntuple_list ethtool_ntuple_list; + + /* phy device may attach itself for hardware timestamping */ + struct phy_device *phydev; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -1087,11 +1105,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, static inline struct net *dev_net(const struct net_device *dev) { -#ifdef CONFIG_NET_NS - return dev->nd_net; -#else - return &init_net; -#endif + return read_pnet(&dev->nd_net); } static inline @@ -1272,8 +1286,8 @@ extern void dev_add_pack(struct packet_type *pt); extern void dev_remove_pack(struct packet_type *pt); extern void __dev_remove_pack(struct packet_type *pt); -extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, - unsigned short mask); +extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, + unsigned short mask); extern struct net_device *dev_get_by_name(struct net *net, const char *name); extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); extern struct net_device *__dev_get_by_name(struct net *net, const char *name); @@ -1696,6 +1710,11 @@ static inline void napi_free_frags(struct napi_struct *napi) napi->skb = NULL; } +extern int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data); +extern void netdev_rx_handler_unregister(struct net_device *dev); + extern void netif_nit_deliver(struct sk_buff *skb); extern int dev_valid_name(const char *name); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); @@ -1775,6 +1794,8 @@ extern void netif_carrier_on(struct net_device *dev); extern void netif_carrier_off(struct net_device *dev); +extern void netif_notify_peers(struct net_device *dev); + /** * netif_dormant_on - mark device as dormant. * @dev: network device @@ -2119,8 +2140,10 @@ extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(struct net *net, const char *name); extern void dev_mcast_init(void); -extern const struct net_device_stats *dev_get_stats(struct net_device *dev); -extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); +extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *storage); +extern void dev_txq_stats_fold(const struct net_device *dev, + struct rtnl_link_stats64 *stats); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; @@ -2230,25 +2253,23 @@ static inline const char *netdev_name(const struct net_device *dev) return dev->name; } -#define netdev_printk(level, netdev, format, args...) \ - dev_printk(level, (netdev)->dev.parent, \ - "%s: " format, \ - netdev_name(netdev), ##args) - -#define netdev_emerg(dev, format, args...) \ - netdev_printk(KERN_EMERG, dev, format, ##args) -#define netdev_alert(dev, format, args...) \ - netdev_printk(KERN_ALERT, dev, format, ##args) -#define netdev_crit(dev, format, args...) \ - netdev_printk(KERN_CRIT, dev, format, ##args) -#define netdev_err(dev, format, args...) \ - netdev_printk(KERN_ERR, dev, format, ##args) -#define netdev_warn(dev, format, args...) \ - netdev_printk(KERN_WARNING, dev, format, ##args) -#define netdev_notice(dev, format, args...) \ - netdev_printk(KERN_NOTICE, dev, format, ##args) -#define netdev_info(dev, format, args...) \ - netdev_printk(KERN_INFO, dev, format, ##args) +extern int netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...) + __attribute__ ((format (printf, 3, 4))); +extern int netdev_emerg(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_alert(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_crit(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_err(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_warn(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_notice(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); +extern int netdev_info(const struct net_device *dev, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); #if defined(DEBUG) #define netdev_dbg(__dev, format, args...) \ @@ -2296,20 +2317,26 @@ do { \ netdev_printk(level, (dev), fmt, ##args); \ } while (0) +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + #define netif_emerg(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args) + netif_level(emerg, priv, type, dev, fmt, ##args) #define netif_alert(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args) + netif_level(alert, priv, type, dev, fmt, ##args) #define netif_crit(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args) + netif_level(crit, priv, type, dev, fmt, ##args) #define netif_err(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_ERR, dev, fmt, ##args) + netif_level(err, priv, type, dev, fmt, ##args) #define netif_warn(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args) + netif_level(warn, priv, type, dev, fmt, ##args) #define netif_notice(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args) + netif_level(notice, priv, type, dev, fmt, ##args) #define netif_info(priv, type, dev, fmt, args...) \ - netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args) + netif_level(info, priv, type, dev, fmt, ##args) #if defined(DEBUG) #define netif_dbg(priv, type, dev, format, args...) \ diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 48767cd164537760d6e8bd99db88d971afbef88d..edeeabdc1500bb51c4487a4dc8b8f132ed2cf974 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -3,11 +3,13 @@ header-y += nf_conntrack_tuple_common.h header-y += nfnetlink_conntrack.h header-y += nfnetlink_log.h header-y += nfnetlink_queue.h +header-y += xt_CHECKSUM.h header-y += xt_CLASSIFY.h header-y += xt_CONNMARK.h header-y += xt_CONNSECMARK.h header-y += xt_CT.h header-y += xt_DSCP.h +header-y += xt_IDLETIMER.h header-y += xt_LED.h header-y += xt_MARK.h header-y += xt_NFLOG.h @@ -18,17 +20,19 @@ header-y += xt_TCPMSS.h header-y += xt_TCPOPTSTRIP.h header-y += xt_TEE.h header-y += xt_TPROXY.h +header-y += xt_cluster.h header-y += xt_comment.h header-y += xt_connbytes.h header-y += xt_connlimit.h header-y += xt_connmark.h header-y += xt_conntrack.h -header-y += xt_cluster.h +header-y += xt_cpu.h header-y += xt_dccp.h header-y += xt_dscp.h header-y += xt_esp.h header-y += xt_hashlimit.h header-y += xt_iprange.h +header-y += xt_ipvs.h header-y += xt_helper.h header-y += xt_length.h header-y += xt_limit.h diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 14e6d32002c43d5a104bd45878c7671f9cbc20bc..1afd18c855ec99d9cf8cb4b9570b06b6e28b74d2 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -76,6 +76,10 @@ enum ip_conntrack_status { /* Conntrack is a template */ IPS_TEMPLATE_BIT = 11, IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), + + /* Conntrack is a fake untracked entry */ + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), }; /* Connection tracking event types */ diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h index d3bab7a2c9b710eeecaecd24f660697b14263196..ea9b8d3805272cdf0378374ef59b3ffaeea67876 100644 --- a/include/linux/netfilter/nfnetlink_log.h +++ b/include/linux/netfilter/nfnetlink_log.h @@ -89,6 +89,7 @@ enum nfulnl_attr_config { #define NFULNL_COPY_NONE 0x00 #define NFULNL_COPY_META 0x01 #define NFULNL_COPY_PACKET 0x02 +/* 0xff is reserved, don't use it for new copy modes. */ #define NFULNL_CFG_F_SEQ 0x0001 #define NFULNL_CFG_F_SEQ_GLOBAL 0x0002 diff --git a/include/linux/netfilter/xt_CHECKSUM.h b/include/linux/netfilter/xt_CHECKSUM.h new file mode 100644 index 0000000000000000000000000000000000000000..9a2e4661654ec4e915530129e9ec1ab7253ec36c --- /dev/null +++ b/include/linux/netfilter/xt_CHECKSUM.h @@ -0,0 +1,20 @@ +/* Header file for iptables ipt_CHECKSUM target + * + * (C) 2002 by Harald Welte + * (C) 2010 Red Hat Inc + * Author: Michael S. Tsirkin + * + * This software is distributed under GNU GPL v2, 1991 +*/ +#ifndef _XT_CHECKSUM_TARGET_H +#define _XT_CHECKSUM_TARGET_H + +#include + +#define XT_CHECKSUM_OP_FILL 0x01 /* fill in checksum in IP header */ + +struct xt_CHECKSUM_info { + __u8 operation; /* bitset of operations */ +}; + +#endif /* _XT_CHECKSUM_TARGET_H */ diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h new file mode 100644 index 0000000000000000000000000000000000000000..3e1aa1be942ef2297637d0bf4a778eec605d7040 --- /dev/null +++ b/include/linux/netfilter/xt_IDLETIMER.h @@ -0,0 +1,45 @@ +/* + * linux/include/linux/netfilter/xt_IDLETIMER.h + * + * Header file for Xtables timer target module. + * + * Copyright (C) 2004, 2010 Nokia Corporation + * Written by Timo Teras + * + * Converted to x_tables and forward-ported to 2.6.34 + * by Luciano Coelho + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef _XT_IDLETIMER_H +#define _XT_IDLETIMER_H + +#include + +#define MAX_IDLETIMER_LABEL_SIZE 28 + +struct idletimer_tg_info { + __u32 timeout; + + char label[MAX_IDLETIMER_LABEL_SIZE]; + + /* for kernel module internal use only */ + struct idletimer_tg *timer __attribute((aligned(8))); +}; + +#endif diff --git a/include/linux/netfilter/xt_cpu.h b/include/linux/netfilter/xt_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..93c7f11d8f421bf70b83a4ba65c8654bebedd898 --- /dev/null +++ b/include/linux/netfilter/xt_cpu.h @@ -0,0 +1,11 @@ +#ifndef _XT_CPU_H +#define _XT_CPU_H + +#include + +struct xt_cpu_info { + __u32 cpu; + __u32 invert; +}; + +#endif /*_XT_CPU_H*/ diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h new file mode 100644 index 0000000000000000000000000000000000000000..1167aeb7a34793ff288aff2b7190a966378e5080 --- /dev/null +++ b/include/linux/netfilter/xt_ipvs.h @@ -0,0 +1,27 @@ +#ifndef _XT_IPVS_H +#define _XT_IPVS_H + +enum { + XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ + XT_IPVS_PROTO = 1 << 1, + XT_IPVS_VADDR = 1 << 2, + XT_IPVS_VPORT = 1 << 3, + XT_IPVS_DIR = 1 << 4, + XT_IPVS_METHOD = 1 << 5, + XT_IPVS_VPORTCTL = 1 << 6, + XT_IPVS_MASK = (1 << 7) - 1, + XT_IPVS_ONCE_MASK = XT_IPVS_MASK & ~XT_IPVS_IPVS_PROPERTY +}; + +struct xt_ipvs_mtinfo { + union nf_inet_addr vaddr, vmask; + __be16 vport; + __u8 l4proto; + __u8 fwd_method; + __be16 vportctl; + + __u8 invert; + __u8 bitmask; +}; + +#endif /* _XT_IPVS_H */ diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h index 8dc89dfc1361761578ef2c73fd6dcdce74327f0f..b0d28c659ab75c7a87aa9e4cb956c516bf5aeaf1 100644 --- a/include/linux/netfilter/xt_quota.h +++ b/include/linux/netfilter/xt_quota.h @@ -11,9 +11,9 @@ struct xt_quota_priv; struct xt_quota_info { u_int32_t flags; u_int32_t pad; + aligned_u64 quota; /* Used internally by the kernel */ - aligned_u64 quota; struct xt_quota_priv *master; }; diff --git a/include/linux/netfilter_ipv4/ipt_LOG.h b/include/linux/netfilter_ipv4/ipt_LOG.h index 90fa6525ef9c8bb731015bfd6e4f309a717a4c38..dcdbadf9fd4a94180ed0705db436e4470bf0e9dd 100644 --- a/include/linux/netfilter_ipv4/ipt_LOG.h +++ b/include/linux/netfilter_ipv4/ipt_LOG.h @@ -7,7 +7,8 @@ #define IPT_LOG_IPOPT 0x04 /* Log IP options */ #define IPT_LOG_UID 0x08 /* Log UID owning local socket */ #define IPT_LOG_NFLOG 0x10 /* Unsupported, don't reuse */ -#define IPT_LOG_MASK 0x1f +#define IPT_LOG_MACDECODE 0x20 /* Decode MAC header */ +#define IPT_LOG_MASK 0x2f struct ipt_log_info { unsigned char level; diff --git a/include/linux/netfilter_ipv6/ip6t_LOG.h b/include/linux/netfilter_ipv6/ip6t_LOG.h index 0d0119b0458c7f6d23190118ac342005edd0d419..9dd5579e02ec75b82a503fc818d292e19b6b78c0 100644 --- a/include/linux/netfilter_ipv6/ip6t_LOG.h +++ b/include/linux/netfilter_ipv6/ip6t_LOG.h @@ -7,7 +7,8 @@ #define IP6T_LOG_IPOPT 0x04 /* Log IP options */ #define IP6T_LOG_UID 0x08 /* Log UID owning local socket */ #define IP6T_LOG_NFLOG 0x10 /* Unsupported, don't use */ -#define IP6T_LOG_MASK 0x1f +#define IP6T_LOG_MACDECODE 0x20 /* Decode MAC header */ +#define IP6T_LOG_MASK 0x2f struct ip6t_log_info { unsigned char level; diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e9e231215865bd6218abd37c320784ab3976235e..413742c92d14efce0e98743a101a67fb3e8c2f97 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np); void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); +int __netpoll_setup(struct netpoll *np); int netpoll_setup(struct netpoll *np); int netpoll_trap(void); void netpoll_set_trap(int trap); +void __netpoll_cleanup(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); int __netpoll_rx(struct sk_buff *skb); void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); @@ -57,12 +59,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); #ifdef CONFIG_NETPOLL static inline bool netpoll_rx(struct sk_buff *skb) { - struct netpoll_info *npinfo = skb->dev->npinfo; + struct netpoll_info *npinfo; unsigned long flags; bool ret = false; + rcu_read_lock_bh(); + npinfo = rcu_dereference_bh(skb->dev->npinfo); + if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) - return false; + goto out; spin_lock_irqsave(&npinfo->rx_lock, flags); /* check rx_flags again with the lock held */ @@ -70,12 +75,14 @@ static inline bool netpoll_rx(struct sk_buff *skb) ret = true; spin_unlock_irqrestore(&npinfo->rx_lock, flags); +out: + rcu_read_unlock_bh(); return ret; } static inline int netpoll_rx_on(struct sk_buff *skb) { - struct netpoll_info *npinfo = skb->dev->npinfo; + struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); } @@ -91,7 +98,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) { struct net_device *dev = napi->dev; - rcu_read_lock(); /* deal with race on ->npinfo */ if (dev && dev->npinfo) { spin_lock(&napi->poll_lock); napi->poll_owner = smp_processor_id(); @@ -108,7 +114,11 @@ static inline void netpoll_poll_unlock(void *have) napi->poll_owner = -1; spin_unlock(&napi->poll_lock); } - rcu_read_unlock(); +} + +static inline int netpoll_tx_running(struct net_device *dev) +{ + return irqs_disabled(); } #else @@ -134,6 +144,10 @@ static inline void netpoll_poll_unlock(void *have) static inline void netpoll_netdev_init(struct net_device *dev) { } +static inline int netpoll_tx_running(struct net_device *dev) +{ + return 0; +} #endif #endif diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index b7c77f9712f4bedd172ac9ef9ecdfd23a8ac084b..2c8701687336c94327297274e6bf0502c5b3633c 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -132,7 +132,7 @@ * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain - * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will + * to the specified ISO/IEC 3166-1 alpha2 country code. The core will * store this as a valid request and then query userspace for it. * * @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the @@ -725,6 +725,12 @@ enum nl80211_commands { * @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations * connected to this BSS. * + * @NL80211_ATTR_WIPHY_TX_POWER_SETTING: Transmit power setting type. See + * &enum nl80211_tx_power_setting for possible values. + * @NL80211_ATTR_WIPHY_TX_POWER_LEVEL: Transmit power level in signed mBm units. + * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING + * for non-automatic settings. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -882,6 +888,9 @@ enum nl80211_attrs { NL80211_ATTR_AP_ISOLATE, + NL80211_ATTR_WIPHY_TX_POWER_SETTING, + NL80211_ATTR_WIPHY_TX_POWER_LEVEL, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -1659,4 +1668,17 @@ enum nl80211_cqm_rssi_threshold_event { NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, }; + +/** + * enum nl80211_tx_power_setting - TX power adjustment + * @NL80211_TX_POWER_AUTOMATIC: automatically determine transmit power + * @NL80211_TX_POWER_LIMITED: limit TX power by the mBm parameter + * @NL80211_TX_POWER_FIXED: fix TX power to the mBm parameter + */ +enum nl80211_tx_power_setting { + NL80211_TX_POWER_AUTOMATIC, + NL80211_TX_POWER_LIMITED, + NL80211_TX_POWER_FIXED, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 540703b555cb2be4bbea2d8e7f0b84d96ca3b474..b2f1a4d835506b7d0d8fdce8831d608fa28759bd 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -210,6 +210,7 @@ static inline int notifier_to_errno(int ret) #define NETDEV_POST_INIT 0x0010 #define NETDEV_UNREGISTER_BATCH 0x0011 #define NETDEV_BONDING_DESLAVE 0x0012 +#define NETDEV_NOTIFY_PEERS 0x0013 #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3bedcc149c843bfec71e29c2b6e319a4d052c70b..e69612cace616745d1fdd0b29139832b245ab9da 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1494,6 +1494,9 @@ #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 #define PCI_DEVICE_ID_SBE_WANXL400 0x0104 +#define PCI_SUBDEVICE_ID_SBE_T3E3 0x0009 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P0 0x0901 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P1 0x0902 #define PCI_VENDOR_ID_TOSHIBA 0x1179 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101 @@ -2054,7 +2057,6 @@ #define PCI_DEVICE_ID_NX2_57711E 0x1650 #define PCI_DEVICE_ID_TIGON3_5705 0x1653 #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 -#define PCI_DEVICE_ID_TIGON3_5720 0x1658 #define PCI_DEVICE_ID_TIGON3_5721 0x1659 #define PCI_DEVICE_ID_TIGON3_5722 0x165a #define PCI_DEVICE_ID_TIGON3_5723 0x165b @@ -2068,13 +2070,11 @@ #define PCI_DEVICE_ID_TIGON3_5754M 0x1672 #define PCI_DEVICE_ID_TIGON3_5755M 0x1673 #define PCI_DEVICE_ID_TIGON3_5756 0x1674 -#define PCI_DEVICE_ID_TIGON3_5750 0x1676 #define PCI_DEVICE_ID_TIGON3_5751 0x1677 #define PCI_DEVICE_ID_TIGON3_5715 0x1678 #define PCI_DEVICE_ID_TIGON3_5715S 0x1679 #define PCI_DEVICE_ID_TIGON3_5754 0x167a #define PCI_DEVICE_ID_TIGON3_5755 0x167b -#define PCI_DEVICE_ID_TIGON3_5750M 0x167c #define PCI_DEVICE_ID_TIGON3_5751M 0x167d #define PCI_DEVICE_ID_TIGON3_5751F 0x167e #define PCI_DEVICE_ID_TIGON3_5787F 0x167f diff --git a/include/linux/phonet.h b/include/linux/phonet.h index e5126cff9b2a4f30e61aca41f5d922998302e621..24426c3d6b5ab34a463cf3ee650b497d9e545522 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h @@ -56,7 +56,7 @@ struct phonethdr { __be16 pn_length; __u8 pn_robj; __u8 pn_sobj; -} __attribute__((packed)); +} __packed; /* Common Phonet payload header */ struct phonetmsg { @@ -98,7 +98,7 @@ struct sockaddr_pn { __u8 spn_dev; __u8 spn_resource; __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; -} __attribute__ ((packed)); +} __packed; /* Well known address */ #define PN_DEV_PC 0x10 diff --git a/include/linux/phy.h b/include/linux/phy.h index 987e111f7b112ec1c72ab86243819666614c0676..6b0a782c6224f60d4530139a90ebc479ac6b3513 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -234,6 +234,8 @@ enum phy_state { PHY_RESUMING }; +struct sk_buff; + /* phy_device: An instance of a PHY * * drv: Pointer to the driver for this PHY instance @@ -402,6 +404,26 @@ struct phy_driver { /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); + /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */ + int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr); + + /* + * Requests a Rx timestamp for 'skb'. If the skb is accepted, + * the phy driver promises to deliver it using netif_rx() as + * soon as a timestamp becomes available. One of the + * PTP_CLASS_ values is passed in 'type'. The function must + * return true if the skb is accepted for delivery. + */ + bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + /* + * Requests a Tx timestamp for 'skb'. The phy driver promises + * to deliver it to the socket's error queue as soon as a + * timestamp becomes available. One of the PTP_CLASS_ values + * is passed in 'type'. + */ + void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + struct device_driver driver; }; #define to_phy_driver(d) container_of(d, struct phy_driver, driver) @@ -498,7 +520,7 @@ void phy_stop_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); int phy_mii_ioctl(struct phy_device *phydev, - struct mii_ioctl_data *mii_data, int cmd); + struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id); diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h new file mode 100644 index 0000000000000000000000000000000000000000..943a85ab0020a289895d6069650a6d94867deb06 --- /dev/null +++ b/include/linux/ptp_classify.h @@ -0,0 +1,126 @@ +/* + * PTP 1588 support + * + * This file implements a BPF that recognizes PTP event messages. + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PTP_CLASSIFY_H_ +#define _PTP_CLASSIFY_H_ + +#include +#include +#include +#ifdef __KERNEL__ +#include +#else +#include +#endif + +#define PTP_CLASS_NONE 0x00 /* not a PTP event message */ +#define PTP_CLASS_V1 0x01 /* protocol version 1 */ +#define PTP_CLASS_V2 0x02 /* protocol version 2 */ +#define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ +#define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ +#define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ +#define PTP_CLASS_L2 0x30 /* event in a L2 packet */ +#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged L2 packet */ +#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */ + +#define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) +#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /*probably DNE*/ +#define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4) +#define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) +#define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) +#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) + +#define PTP_EV_PORT 319 + +#define OFF_ETYPE 12 +#define OFF_IHL 14 +#define OFF_FRAG 20 +#define OFF_PROTO4 23 +#define OFF_NEXT 6 +#define OFF_UDP_DST 2 + +#define IP6_HLEN 40 +#define UDP_HLEN 8 + +#define RELOFF_DST4 (ETH_HLEN + OFF_UDP_DST) +#define OFF_DST6 (ETH_HLEN + IP6_HLEN + OFF_UDP_DST) +#define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN) + +#define OP_AND (BPF_ALU | BPF_AND | BPF_K) +#define OP_JEQ (BPF_JMP | BPF_JEQ | BPF_K) +#define OP_JSET (BPF_JMP | BPF_JSET | BPF_K) +#define OP_LDB (BPF_LD | BPF_B | BPF_ABS) +#define OP_LDH (BPF_LD | BPF_H | BPF_ABS) +#define OP_LDHI (BPF_LD | BPF_H | BPF_IND) +#define OP_LDX (BPF_LDX | BPF_B | BPF_MSH) +#define OP_OR (BPF_ALU | BPF_OR | BPF_K) +#define OP_RETA (BPF_RET | BPF_A) +#define OP_RETK (BPF_RET | BPF_K) + +static inline int ptp_filter_init(struct sock_filter *f, int len) +{ + if (OP_LDH == f[0].code) + return sk_chk_filter(f, len); + else + return 0; +} + +#define PTP_FILTER \ + {OP_LDH, 0, 0, OFF_ETYPE }, /* */ \ + {OP_JEQ, 0, 12, ETH_P_IP }, /* f goto L20 */ \ + {OP_LDB, 0, 0, OFF_PROTO4 }, /* */ \ + {OP_JEQ, 0, 9, IPPROTO_UDP }, /* f goto L10 */ \ + {OP_LDH, 0, 0, OFF_FRAG }, /* */ \ + {OP_JSET, 7, 0, 0x1fff }, /* t goto L11 */ \ + {OP_LDX, 0, 0, OFF_IHL }, /* */ \ + {OP_LDHI, 0, 0, RELOFF_DST4 }, /* */ \ + {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L12 */ \ + {OP_LDHI, 0, 0, ETH_HLEN + UDP_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_IPV4 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L1x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ +/*L20*/ {OP_JEQ, 0, 9, ETH_P_IPV6 }, /* f goto L40 */ \ + {OP_LDB, 0, 0, ETH_HLEN + OFF_NEXT }, /* */ \ + {OP_JEQ, 0, 6, IPPROTO_UDP }, /* f goto L30 */ \ + {OP_LDH, 0, 0, OFF_DST6 }, /* */ \ + {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L31 */ \ + {OP_LDH, 0, 0, OFF_PTP6 }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ +/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ + {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ + {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ + {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ + {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L6x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, + +#endif diff --git a/include/linux/rds.h b/include/linux/rds.h index cab4994c2f63062e1b4c26958a57bff2abe92205..24bce3ded9eade070774b40a517446b62df7fce9 100644 --- a/include/linux/rds.h +++ b/include/linux/rds.h @@ -100,7 +100,7 @@ struct rds_info_counter { u_int8_t name[32]; u_int64_t value; -} __attribute__((packed)); +} __packed; #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 @@ -115,7 +115,7 @@ struct rds_info_connection { __be32 faddr; u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */ u_int8_t flags; -} __attribute__((packed)); +} __packed; struct rds_info_flow { __be32 laddr; @@ -123,7 +123,7 @@ struct rds_info_flow { u_int32_t bytes; __be16 lport; __be16 fport; -} __attribute__((packed)); +} __packed; #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 @@ -136,7 +136,7 @@ struct rds_info_message { __be16 lport; __be16 fport; u_int8_t flags; -} __attribute__((packed)); +} __packed; struct rds_info_socket { u_int32_t sndbuf; @@ -146,7 +146,7 @@ struct rds_info_socket { __be16 connected_port; u_int32_t rcvbuf; u_int64_t inum; -} __attribute__((packed)); +} __packed; struct rds_info_tcp_socket { __be32 local_addr; @@ -158,7 +158,7 @@ struct rds_info_tcp_socket { u_int32_t last_sent_nxt; u_int32_t last_expected_una; u_int32_t last_seen_una; -} __attribute__((packed)); +} __packed; #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index fbc8cb0d48c336d4e71f458ad3f8b8d842a1c918..58d44491880fa79ec30d7c863f609547c4ed7439 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -282,6 +282,7 @@ enum rtattr_type_t { RTA_SESSION, /* no longer used */ RTA_MP_ALGO, /* no longer used */ RTA_TABLE, + RTA_MARK, __RTA_MAX }; diff --git a/include/linux/sctp.h b/include/linux/sctp.h index c20d3ce673c050e269027de701ef4821c8396d47..c11a28706fa499fd630c7662d6d177f91a074428 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -61,7 +61,7 @@ typedef struct sctphdr { __be16 dest; __be32 vtag; __le32 checksum; -} __attribute__((packed)) sctp_sctphdr_t; +} __packed sctp_sctphdr_t; #ifdef __KERNEL__ #include @@ -77,7 +77,7 @@ typedef struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; -} __attribute__((packed)) sctp_chunkhdr_t; +} __packed sctp_chunkhdr_t; /* Section 3.2. Chunk Type Values. @@ -167,7 +167,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 }; typedef struct sctp_paramhdr { __be16 type; __be16 length; -} __attribute__((packed)) sctp_paramhdr_t; +} __packed sctp_paramhdr_t; typedef enum { @@ -228,12 +228,12 @@ typedef struct sctp_datahdr { __be16 ssn; __be32 ppid; __u8 payload[0]; -} __attribute__((packed)) sctp_datahdr_t; +} __packed sctp_datahdr_t; typedef struct sctp_data_chunk { sctp_chunkhdr_t chunk_hdr; sctp_datahdr_t data_hdr; -} __attribute__((packed)) sctp_data_chunk_t; +} __packed sctp_data_chunk_t; /* DATA Chuck Specific Flags */ enum { @@ -259,78 +259,78 @@ typedef struct sctp_inithdr { __be16 num_inbound_streams; __be32 initial_tsn; __u8 params[0]; -} __attribute__((packed)) sctp_inithdr_t; +} __packed sctp_inithdr_t; typedef struct sctp_init_chunk { sctp_chunkhdr_t chunk_hdr; sctp_inithdr_t init_hdr; -} __attribute__((packed)) sctp_init_chunk_t; +} __packed sctp_init_chunk_t; /* Section 3.3.2.1. IPv4 Address Parameter (5) */ typedef struct sctp_ipv4addr_param { sctp_paramhdr_t param_hdr; struct in_addr addr; -} __attribute__((packed)) sctp_ipv4addr_param_t; +} __packed sctp_ipv4addr_param_t; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ typedef struct sctp_ipv6addr_param { sctp_paramhdr_t param_hdr; struct in6_addr addr; -} __attribute__((packed)) sctp_ipv6addr_param_t; +} __packed sctp_ipv6addr_param_t; /* Section 3.3.2.1 Cookie Preservative (9) */ typedef struct sctp_cookie_preserve_param { sctp_paramhdr_t param_hdr; __be32 lifespan_increment; -} __attribute__((packed)) sctp_cookie_preserve_param_t; +} __packed sctp_cookie_preserve_param_t; /* Section 3.3.2.1 Host Name Address (11) */ typedef struct sctp_hostname_param { sctp_paramhdr_t param_hdr; uint8_t hostname[0]; -} __attribute__((packed)) sctp_hostname_param_t; +} __packed sctp_hostname_param_t; /* Section 3.3.2.1 Supported Address Types (12) */ typedef struct sctp_supported_addrs_param { sctp_paramhdr_t param_hdr; __be16 types[0]; -} __attribute__((packed)) sctp_supported_addrs_param_t; +} __packed sctp_supported_addrs_param_t; /* Appendix A. ECN Capable (32768) */ typedef struct sctp_ecn_capable_param { sctp_paramhdr_t param_hdr; -} __attribute__((packed)) sctp_ecn_capable_param_t; +} __packed sctp_ecn_capable_param_t; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ typedef struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; -} __attribute__((packed)) sctp_adaptation_ind_param_t; +} __packed sctp_adaptation_ind_param_t; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ typedef struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; -} __attribute__((packed)) sctp_supported_ext_param_t; +} __packed sctp_supported_ext_param_t; /* AUTH Section 3.1 Random */ typedef struct sctp_random_param { sctp_paramhdr_t param_hdr; __u8 random_val[0]; -} __attribute__((packed)) sctp_random_param_t; +} __packed sctp_random_param_t; /* AUTH Section 3.2 Chunk List */ typedef struct sctp_chunks_param { sctp_paramhdr_t param_hdr; __u8 chunks[0]; -} __attribute__((packed)) sctp_chunks_param_t; +} __packed sctp_chunks_param_t; /* AUTH Section 3.3 HMAC Algorithm */ typedef struct sctp_hmac_algo_param { sctp_paramhdr_t param_hdr; __be16 hmac_ids[0]; -} __attribute__((packed)) sctp_hmac_algo_param_t; +} __packed sctp_hmac_algo_param_t; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP @@ -342,13 +342,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t; typedef struct sctp_cookie_param { sctp_paramhdr_t p; __u8 body[0]; -} __attribute__((packed)) sctp_cookie_param_t; +} __packed sctp_cookie_param_t; /* Section 3.3.3.1 Unrecognized Parameters (8) */ typedef struct sctp_unrecognized_param { sctp_paramhdr_t param_hdr; sctp_paramhdr_t unrecognized; -} __attribute__((packed)) sctp_unrecognized_param_t; +} __packed sctp_unrecognized_param_t; @@ -363,7 +363,7 @@ typedef struct sctp_unrecognized_param { typedef struct sctp_gap_ack_block { __be16 start; __be16 end; -} __attribute__((packed)) sctp_gap_ack_block_t; +} __packed sctp_gap_ack_block_t; typedef __be32 sctp_dup_tsn_t; @@ -378,12 +378,12 @@ typedef struct sctp_sackhdr { __be16 num_gap_ack_blocks; __be16 num_dup_tsns; sctp_sack_variable_t variable[0]; -} __attribute__((packed)) sctp_sackhdr_t; +} __packed sctp_sackhdr_t; typedef struct sctp_sack_chunk { sctp_chunkhdr_t chunk_hdr; sctp_sackhdr_t sack_hdr; -} __attribute__((packed)) sctp_sack_chunk_t; +} __packed sctp_sack_chunk_t; /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): @@ -395,12 +395,12 @@ typedef struct sctp_sack_chunk { typedef struct sctp_heartbeathdr { sctp_paramhdr_t info; -} __attribute__((packed)) sctp_heartbeathdr_t; +} __packed sctp_heartbeathdr_t; typedef struct sctp_heartbeat_chunk { sctp_chunkhdr_t chunk_hdr; sctp_heartbeathdr_t hb_hdr; -} __attribute__((packed)) sctp_heartbeat_chunk_t; +} __packed sctp_heartbeat_chunk_t; /* For the abort and shutdown ACK we must carry the init tag in the @@ -409,7 +409,7 @@ typedef struct sctp_heartbeat_chunk { */ typedef struct sctp_abort_chunk { sctp_chunkhdr_t uh; -} __attribute__((packed)) sctp_abort_chunk_t; +} __packed sctp_abort_chunk_t; /* For the graceful shutdown we must carry the tag (in common header) @@ -417,12 +417,12 @@ typedef struct sctp_abort_chunk { */ typedef struct sctp_shutdownhdr { __be32 cum_tsn_ack; -} __attribute__((packed)) sctp_shutdownhdr_t; +} __packed sctp_shutdownhdr_t; struct sctp_shutdown_chunk_t { sctp_chunkhdr_t chunk_hdr; sctp_shutdownhdr_t shutdown_hdr; -} __attribute__ ((packed)); +} __packed; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ @@ -430,12 +430,12 @@ typedef struct sctp_errhdr { __be16 cause; __be16 length; __u8 variable[0]; -} __attribute__((packed)) sctp_errhdr_t; +} __packed sctp_errhdr_t; typedef struct sctp_operr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_errhdr_t err_hdr; -} __attribute__((packed)) sctp_operr_chunk_t; +} __packed sctp_operr_chunk_t; /* RFC 2960 3.3.10 - Operation Error * @@ -525,7 +525,7 @@ typedef struct sctp_ecnehdr { typedef struct sctp_ecne_chunk { sctp_chunkhdr_t chunk_hdr; sctp_ecnehdr_t ence_hdr; -} __attribute__((packed)) sctp_ecne_chunk_t; +} __packed sctp_ecne_chunk_t; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) @@ -537,7 +537,7 @@ typedef struct sctp_cwrhdr { typedef struct sctp_cwr_chunk { sctp_chunkhdr_t chunk_hdr; sctp_cwrhdr_t cwr_hdr; -} __attribute__((packed)) sctp_cwr_chunk_t; +} __packed sctp_cwr_chunk_t; /* PR-SCTP * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) @@ -588,17 +588,17 @@ typedef struct sctp_cwr_chunk { struct sctp_fwdtsn_skip { __be16 stream; __be16 ssn; -} __attribute__((packed)); +} __packed; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; struct sctp_fwdtsn_skip skip[0]; -} __attribute((packed)); +} __packed; struct sctp_fwdtsn_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_fwdtsn_hdr fwdtsn_hdr; -} __attribute((packed)); +} __packed; /* ADDIP @@ -636,17 +636,17 @@ struct sctp_fwdtsn_chunk { typedef struct sctp_addip_param { sctp_paramhdr_t param_hdr; __be32 crr_id; -} __attribute__((packed)) sctp_addip_param_t; +} __packed sctp_addip_param_t; typedef struct sctp_addiphdr { __be32 serial; __u8 params[0]; -} __attribute__((packed)) sctp_addiphdr_t; +} __packed sctp_addiphdr_t; typedef struct sctp_addip_chunk { sctp_chunkhdr_t chunk_hdr; sctp_addiphdr_t addip_hdr; -} __attribute__((packed)) sctp_addip_chunk_t; +} __packed sctp_addip_chunk_t; /* AUTH * Section 4.1 Authentication Chunk (AUTH) @@ -701,11 +701,11 @@ typedef struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; __u8 hmac[0]; -} __attribute__((packed)) sctp_authhdr_t; +} __packed sctp_authhdr_t; typedef struct sctp_auth_chunk { sctp_chunkhdr_t chunk_hdr; sctp_authhdr_t auth_hdr; -} __attribute__((packed)) sctp_auth_chunk_t; +} __packed sctp_auth_chunk_t; #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f89e7fd59a4c4029a5e301b30c14df0ee7ae2699..d20d9e7a9bbda130e4499813c10c8c3a82c1bebc 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -169,6 +169,7 @@ struct skb_shared_hwtstamps { * @software: generate software time stamp * @in_progress: device driver is going to provide * hardware time stamp + * @prevent_sk_orphan: make sk reference available on driver level * @flags: all shared_tx flags * * These flags are attached to packets as part of the @@ -178,7 +179,8 @@ union skb_shared_tx { struct { __u8 hardware:1, software:1, - in_progress:1; + in_progress:1, + prevent_sk_orphan:1; }; __u8 flags; }; @@ -202,10 +204,11 @@ struct skb_shared_info { */ atomic_t dataref; - skb_frag_t frags[MAX_SKB_FRAGS]; /* Intermediate layers must ensure that destructor_arg * remains valid until skb destructor */ void * destructor_arg; + /* must be last field, see pskb_expand_head() */ + skb_frag_t frags[MAX_SKB_FRAGS]; }; /* We divide dataref into two halves. The higher 16 bits hold references @@ -1414,12 +1417,14 @@ static inline int skb_network_offset(const struct sk_buff *skb) * * Various parts of the networking layer expect at least 32 bytes of * headroom, you should not reduce this. - * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span - * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes + * + * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) + * to reduce average number of cache lines per packet. + * get_rps_cpus() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD 64 +#define NET_SKB_PAD max(32, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); @@ -1931,6 +1936,36 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } +extern void skb_timestamping_init(void); + +#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING + +extern void skb_clone_tx_timestamp(struct sk_buff *skb); +extern bool skb_defer_rx_timestamp(struct sk_buff *skb); + +#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ + +static inline void skb_clone_tx_timestamp(struct sk_buff *skb) +{ +} + +static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) +{ + return false; +} + +#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ + +/** + * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps + * + * @skb: clone of the the original outgoing packet + * @hwtstamps: hardware time stamps + * + */ +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); + /** * skb_tstamp_tx - queue clone of skb with send time stamps * @orig_skb: the original outgoing packet @@ -1945,6 +1980,28 @@ static inline ktime_t net_invalid_timestamp(void) extern void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); +static inline void sw_tx_timestamp(struct sk_buff *skb) +{ + union skb_shared_tx *shtx = skb_tx(skb); + if (shtx->software && !shtx->in_progress) + skb_tstamp_tx(skb, NULL); +} + +/** + * skb_tx_timestamp() - Driver hook for transmit timestamping + * + * Ethernet MAC Drivers should call this function in their hard_xmit() + * function as soon as possible after giving the sk_buff to the MAC + * hardware, but before freeing the sk_buff. + * + * @skb: A socket buffer. + */ +static inline void skb_tx_timestamp(struct sk_buff *skb) +{ + skb_clone_tx_timestamp(skb); + sw_tx_timestamp(skb); +} + extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); @@ -2132,7 +2189,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb) /* LRO sets gso_size but not gso_type, whereas if GSO is really * wanted then gso_type will be set. */ struct skb_shared_info *shinfo = skb_shinfo(skb); - if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { + if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && + unlikely(shinfo->gso_type == 0)) { __skb_warn_lro_forwarding(skb); return true; } diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 52797714ade7a737b974b39fe2c2df9859318768..ebb0c80ffd6ebb53b02fb2263e0a6ff6a634eb6a 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -229,6 +229,7 @@ enum LINUX_MIB_TCPBACKLOGDROP, LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ LINUX_MIB_TCPDEFERACCEPTDROP, + LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ __LINUX_MIB_MAX }; diff --git a/include/linux/socket.h b/include/linux/socket.h index 032a19eb61b12077a3f8c8f5942d4ac3d4c52806..a2fada9becb60c47fcbf8143b3b3c67d291731b8 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -24,6 +24,9 @@ struct __kernel_sockaddr_storage { #include /* pid_t */ #include /* __user */ +struct pid; +struct cred; + #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -309,6 +312,8 @@ struct ucred { #define IPX_TYPE 1 #ifdef __KERNEL__ +extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred); + extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index a2608bff9c78a3a3c69544dbb6c187572535e902..623b704fdc42a371bfdf0c20d5a381d8598e1a21 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -167,7 +167,7 @@ struct ssb_device { * is an optimization. */ const struct ssb_bus_ops *ops; - struct device *dev; + struct device *dev, *dma_dev; struct ssb_bus *bus; struct ssb_device_id id; @@ -470,14 +470,6 @@ extern u32 ssb_dma_translation(struct ssb_device *dev); #define SSB_DMA_TRANSLATION_MASK 0xC0000000 #define SSB_DMA_TRANSLATION_SHIFT 30 -extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask); - -extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp_flags); -extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - gfp_t gfp_flags); - static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) { #ifdef CONFIG_SSB_DEBUG @@ -486,155 +478,6 @@ static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) #endif /* DEBUG */ } -static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - return pci_dma_mapping_error(dev->bus->host_pci, addr); -#endif - break; - case SSB_BUSTYPE_SSB: - return dma_mapping_error(dev->dev, addr); - default: - break; - } - __ssb_dma_not_implemented(dev); - return -ENOSYS; -} - -static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p, - size_t size, enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - return pci_map_single(dev->bus->host_pci, p, size, dir); -#endif - break; - case SSB_BUSTYPE_SSB: - return dma_map_single(dev->dev, p, size, dir); - default: - break; - } - __ssb_dma_not_implemented(dev); - return 0; -} - -static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_unmap_single(dev->dev, dma_addr, size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, - dma_addr_t dma_addr, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, - size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, - dma_addr_t dma_addr, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, - size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_for_device(dev->dev, dma_addr, size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, - dma_addr_t dma_addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - /* Just sync everything. That's all the PCI API can do. */ - pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, - offset + size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, - size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - -static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, - dma_addr_t dma_addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - switch (dev->bus->bustype) { - case SSB_BUSTYPE_PCI: -#ifdef CONFIG_SSB_PCIHOST - /* Just sync everything. That's all the PCI API can do. */ - pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, - offset + size, dir); - return; -#endif - break; - case SSB_BUSTYPE_SSB: - dma_sync_single_range_for_device(dev->dev, dma_addr, offset, - size, dir); - return; - default: - break; - } - __ssb_dma_not_implemented(dev); -} - - #ifdef CONFIG_SSB_PCIHOST /* PCI-host wrapper driver */ extern int ssb_pcihost_register(struct pci_driver *driver); diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h new file mode 100644 index 0000000000000000000000000000000000000000..fa261a0da280a5e87dd8aece56e66600911fbc1c --- /dev/null +++ b/include/linux/u64_stats_sync.h @@ -0,0 +1,140 @@ +#ifndef _LINUX_U64_STATS_SYNC_H +#define _LINUX_U64_STATS_SYNC_H + +/* + * To properly implement 64bits network statistics on 32bit and 64bit hosts, + * we provide a synchronization point, that is a noop on 64bit or UP kernels. + * + * Key points : + * 1) Use a seqcount on SMP 32bits, with low overhead. + * 2) Whole thing is a noop on 64bit arches or UP kernels. + * 3) Write side must ensure mutual exclusion or one seqcount update could + * be lost, thus blocking readers forever. + * If this synchronization point is not a mutex, but a spinlock or + * spinlock_bh() or disable_bh() : + * 3.1) Write side should not sleep. + * 3.2) Write side should not allow preemption. + * 3.3) If applicable, interrupts should be disabled. + * + * 4) If reader fetches several counters, there is no guarantee the whole values + * are consistent (remember point 1) : this is a noop on 64bit arches anyway) + * + * 5) readers are allowed to sleep or be preempted/interrupted : They perform + * pure reads. But if they have to fetch many values, it's better to not allow + * preemptions/interruptions to avoid many retries. + * + * 6) If counter might be written by an interrupt, readers should block interrupts. + * (On UP, there is no seqcount_t protection, a reader allowing interrupts could + * read partial values) + * + * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and + * u64_stats_fetch_retry_bh() helpers + * + * Usage : + * + * Stats producer (writer) should use following template granted it already got + * an exclusive access to counters (a lock is already taken, or per cpu + * data is used [in a non preemptable context]) + * + * spin_lock_bh(...) or other synchronization to get exclusive access + * ... + * u64_stats_update_begin(&stats->syncp); + * stats->bytes64 += len; // non atomic operation + * stats->packets64++; // non atomic operation + * u64_stats_update_end(&stats->syncp); + * + * While a consumer (reader) should use following template to get consistent + * snapshot for each variable (but no guarantee on several ones) + * + * u64 tbytes, tpackets; + * unsigned int start; + * + * do { + * start = u64_stats_fetch_begin(&stats->syncp); + * tbytes = stats->bytes64; // non atomic operation + * tpackets = stats->packets64; // non atomic operation + * } while (u64_stats_fetch_retry(&stats->syncp, start)); + * + * + * Example of use in drivers/net/loopback.c, using per_cpu containers, + * in BH disabled context. + */ +#include + +struct u64_stats_sync { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + seqcount_t seq; +#endif +}; + +static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_begin(&syncp->seq); +#endif +} + +static void inline u64_stats_update_end(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_end(&syncp->seq); +#endif +} + +static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + preempt_disable(); +#endif + return 0; +#endif +} + +static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_retry(&syncp->seq, start); +#else +#if BITS_PER_LONG==32 + preempt_enable(); +#endif + return false; +#endif +} + +/* + * In case softirq handlers can update u64 counters, readers can use following helpers + * - SMP 32bit arches use seqcount protection, irq safe. + * - UP 32bit must disable BH. + * - 64bit have no problem atomically reading u64 values, irq safe. + */ +static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + local_bh_disable(); +#endif + return 0; +#endif +} + +static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, + unsigned int start) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_retry(&syncp->seq, start); +#else +#if BITS_PER_LONG==32 + local_bh_enable(); +#endif + return false; +#endif +} + +#endif /* _LINUX_U64_STATS_SYNC_H */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index cc4f45361dbbc8a956f9a9fad2a865dc7f4add33..8178156711f9f254c77452d8e3b4d25ded814f5e 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -36,6 +36,9 @@ static inline void put_user_ns(struct user_namespace *ns) kref_put(&ns->kref, free_user_ns); } +uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid); +gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid); + #else static inline struct user_namespace *get_user_ns(struct user_namespace *ns) @@ -52,6 +55,17 @@ static inline void put_user_ns(struct user_namespace *ns) { } +static inline uid_t user_ns_map_uid(struct user_namespace *to, + const struct cred *cred, uid_t uid) +{ + return uid; +} +static inline gid_t user_ns_map_gid(struct user_namespace *to, + const struct cred *cred, gid_t gid) +{ + return gid; +} + #endif #endif /* _LINUX_USER_H */ diff --git a/include/linux/wlp.h b/include/linux/wlp.h index ac95ce6606ac1c0ac3ae35842deead4ad0cdb606..c76fe2392506e1b6ce02403d65ba942d0402c597 100644 --- a/include/linux/wlp.h +++ b/include/linux/wlp.h @@ -300,7 +300,7 @@ struct wlp_ie { __le16 cycle_param; __le16 acw_anchor_addr; u8 wssid_hash_list[]; -} __attribute__((packed)); +} __packed; static inline int wlp_ie_hash_length(struct wlp_ie *ie) { @@ -324,7 +324,7 @@ static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length) */ struct wlp_nonce { u8 data[16]; -} __attribute__((packed)); +} __packed; /** * WLP UUID @@ -336,7 +336,7 @@ struct wlp_nonce { */ struct wlp_uuid { u8 data[16]; -} __attribute__((packed)); +} __packed; /** @@ -348,7 +348,7 @@ struct wlp_dev_type { u8 OUI[3]; u8 OUIsubdiv; __le16 subID; -} __attribute__((packed)); +} __packed; /** * WLP frame header @@ -357,7 +357,7 @@ struct wlp_dev_type { struct wlp_frame_hdr { __le16 mux_hdr; /* WLP_PROTOCOL_ID */ enum wlp_frame_type type:8; -} __attribute__((packed)); +} __packed; /** * WLP attribute field header @@ -368,7 +368,7 @@ struct wlp_frame_hdr { struct wlp_attr_hdr { __le16 type; __le16 length; -} __attribute__((packed)); +} __packed; /** * Device information commonly used together @@ -401,13 +401,13 @@ struct wlp_device_info { struct wlp_attr_##name { \ struct wlp_attr_hdr hdr; \ type name; \ -} __attribute__((packed)); +} __packed; #define wlp_attr_array(type, name) \ struct wlp_attr_##name { \ struct wlp_attr_hdr hdr; \ type name[]; \ -} __attribute__((packed)); +} __packed; /** * WLP association attribute fields @@ -483,7 +483,7 @@ struct wlp_wss_info { struct wlp_attr_accept_enrl accept; struct wlp_attr_wss_sec_status sec_stat; struct wlp_attr_wss_bcast bcast; -} __attribute__((packed)); +} __packed; /* WLP WSS Information */ wlp_attr_array(struct wlp_wss_info, wss_info) @@ -520,7 +520,7 @@ wlp_attr(u8, wlp_assc_err) struct wlp_frame_std_abbrv_hdr { struct wlp_frame_hdr hdr; u8 tag; -} __attribute__((packed)); +} __packed; /** * WLP association frames @@ -533,7 +533,7 @@ struct wlp_frame_assoc { struct wlp_attr_version version; struct wlp_attr_msg_type msg_type; u8 attr[]; -} __attribute__((packed)); +} __packed; /* Ethernet to dev address mapping */ struct wlp_eda { diff --git a/include/net/act_api.h b/include/net/act_api.h index c05fd717c588801a72c4fe7a18a000b4bf46e35c..bab385f13ac32e81fd758e88811397a11964b64e 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -20,6 +20,7 @@ struct tcf_common { struct gnet_stats_queue tcfc_qstats; struct gnet_stats_rate_est tcfc_rate_est; spinlock_t tcfc_lock; + struct rcu_head tcfc_rcu; }; #define tcf_next common.tcfc_next #define tcf_index common.tcfc_index @@ -32,6 +33,7 @@ struct tcf_common { #define tcf_qstats common.tcfc_qstats #define tcf_rate_est common.tcfc_rate_est #define tcf_lock common.tcfc_lock +#define tcf_rcu common.tcfc_rcu struct tcf_police { struct tcf_common common; diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 20725e213aeec4b5363388080a8f5fe61129226b..90c9e2872f27214b4727ecf4f440e4975be1f1df 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -23,7 +23,8 @@ struct unix_address { }; struct unix_skb_parms { - struct ucred creds; /* Skb credentials */ + struct pid *pid; /* Skb credentials */ + const struct cred *cred; struct scm_fp_list *fp; /* Passed files */ #ifdef CONFIG_SECURITY_NETWORK u32 secid; /* Security ID */ @@ -31,7 +32,6 @@ struct unix_skb_parms { }; #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) -#define UNIXCREDS(skb) (&UNIXCB((skb)).creds) #define UNIXSID(skb) (&UNIXCB((skb)).secid) #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index ff77e8f882f13fdc8545662b3543685e661de0f4..27a902d9b3a9a431c6b3162a4c6fe479aa99504c 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -84,7 +84,7 @@ enum { /* BD Address */ typedef struct { __u8 b[6]; -} __attribute__((packed)) bdaddr_t; +} __packed bdaddr_t; #define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) #define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}}) @@ -138,6 +138,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock); struct bt_skb_cb { __u8 pkt_type; __u8 incoming; + __u16 expect; __u8 tx_seq; __u8 retries; __u8 sar; diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index fc0c502d9fd186f95585220bb14217aa50252330..bcbdd6d4e6dd43f1dfb4196a5eb3998309cf5909 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -100,6 +100,9 @@ enum { #define HCISETACLMTU _IOW('H', 227, int) #define HCISETSCOMTU _IOW('H', 228, int) +#define HCIBLOCKADDR _IOW('H', 230, int) +#define HCIUNBLOCKADDR _IOW('H', 231, int) + #define HCIINQUIRY _IOR('H', 240, int) /* HCI timeouts */ @@ -227,7 +230,7 @@ struct hci_cp_inquiry { __u8 lap[3]; __u8 length; __u8 num_rsp; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_INQUIRY_CANCEL 0x0402 @@ -241,81 +244,81 @@ struct hci_cp_create_conn { __u8 pscan_mode; __le16 clock_offset; __u8 role_switch; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_DISCONNECT 0x0406 struct hci_cp_disconnect { __le16 handle; __u8 reason; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ADD_SCO 0x0407 struct hci_cp_add_sco { __le16 handle; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_CREATE_CONN_CANCEL 0x0408 struct hci_cp_create_conn_cancel { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ACCEPT_CONN_REQ 0x0409 struct hci_cp_accept_conn_req { bdaddr_t bdaddr; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REJECT_CONN_REQ 0x040a struct hci_cp_reject_conn_req { bdaddr_t bdaddr; __u8 reason; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_LINK_KEY_REPLY 0x040b struct hci_cp_link_key_reply { bdaddr_t bdaddr; __u8 link_key[16]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_LINK_KEY_NEG_REPLY 0x040c struct hci_cp_link_key_neg_reply { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_PIN_CODE_REPLY 0x040d struct hci_cp_pin_code_reply { bdaddr_t bdaddr; __u8 pin_len; __u8 pin_code[16]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_PIN_CODE_NEG_REPLY 0x040e struct hci_cp_pin_code_neg_reply { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_CHANGE_CONN_PTYPE 0x040f struct hci_cp_change_conn_ptype { __le16 handle; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_AUTH_REQUESTED 0x0411 struct hci_cp_auth_requested { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SET_CONN_ENCRYPT 0x0413 struct hci_cp_set_conn_encrypt { __le16 handle; __u8 encrypt; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415 struct hci_cp_change_conn_link_key { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REMOTE_NAME_REQ 0x0419 struct hci_cp_remote_name_req { @@ -323,28 +326,28 @@ struct hci_cp_remote_name_req { __u8 pscan_rep_mode; __u8 pscan_mode; __le16 clock_offset; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a struct hci_cp_remote_name_req_cancel { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_REMOTE_FEATURES 0x041b struct hci_cp_read_remote_features { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c struct hci_cp_read_remote_ext_features { __le16 handle; __u8 page; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_REMOTE_VERSION 0x041d struct hci_cp_read_remote_version { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SETUP_SYNC_CONN 0x0428 struct hci_cp_setup_sync_conn { @@ -355,7 +358,7 @@ struct hci_cp_setup_sync_conn { __le16 voice_setting; __u8 retrans_effort; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429 struct hci_cp_accept_sync_conn_req { @@ -366,13 +369,13 @@ struct hci_cp_accept_sync_conn_req { __le16 content_format; __u8 retrans_effort; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a struct hci_cp_reject_sync_conn_req { bdaddr_t bdaddr; __u8 reason; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SNIFF_MODE 0x0803 struct hci_cp_sniff_mode { @@ -381,59 +384,59 @@ struct hci_cp_sniff_mode { __le16 min_interval; __le16 attempt; __le16 timeout; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_EXIT_SNIFF_MODE 0x0804 struct hci_cp_exit_sniff_mode { __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_ROLE_DISCOVERY 0x0809 struct hci_cp_role_discovery { __le16 handle; -} __attribute__ ((packed)); +} __packed; struct hci_rp_role_discovery { __u8 status; __le16 handle; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SWITCH_ROLE 0x080b struct hci_cp_switch_role { bdaddr_t bdaddr; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LINK_POLICY 0x080c struct hci_cp_read_link_policy { __le16 handle; -} __attribute__ ((packed)); +} __packed; struct hci_rp_read_link_policy { __u8 status; __le16 handle; __le16 policy; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_LINK_POLICY 0x080d struct hci_cp_write_link_policy { __le16 handle; __le16 policy; -} __attribute__ ((packed)); +} __packed; struct hci_rp_write_link_policy { __u8 status; __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_DEF_LINK_POLICY 0x080e struct hci_rp_read_def_link_policy { __u8 status; __le16 policy; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f struct hci_cp_write_def_link_policy { __le16 policy; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SNIFF_SUBRATE 0x0811 struct hci_cp_sniff_subrate { @@ -441,12 +444,12 @@ struct hci_cp_sniff_subrate { __le16 max_latency; __le16 min_remote_timeout; __le16 min_local_timeout; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_SET_EVENT_MASK 0x0c01 struct hci_cp_set_event_mask { __u8 mask[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_RESET 0x0c03 @@ -455,7 +458,7 @@ struct hci_cp_set_event_flt { __u8 flt_type; __u8 cond_type; __u8 condition[0]; -} __attribute__ ((packed)); +} __packed; /* Filter types */ #define HCI_FLT_CLEAR_ALL 0x00 @@ -474,13 +477,13 @@ struct hci_cp_set_event_flt { #define HCI_OP_WRITE_LOCAL_NAME 0x0c13 struct hci_cp_write_local_name { __u8 name[248]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_NAME 0x0c14 struct hci_rp_read_local_name { __u8 status; __u8 name[248]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_CA_TIMEOUT 0x0c16 @@ -508,23 +511,23 @@ struct hci_rp_read_local_name { struct hci_rp_read_class_of_dev { __u8 status; __u8 dev_class[3]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24 struct hci_cp_write_class_of_dev { __u8 dev_class[3]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_VOICE_SETTING 0x0c25 struct hci_rp_read_voice_setting { __u8 status; __le16 voice_setting; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_VOICE_SETTING 0x0c26 struct hci_cp_write_voice_setting { __le16 voice_setting; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_HOST_BUFFER_SIZE 0x0c33 struct hci_cp_host_buffer_size { @@ -532,18 +535,18 @@ struct hci_cp_host_buffer_size { __u8 sco_mtu; __le16 acl_max_pkt; __le16 sco_max_pkt; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_SSP_MODE 0x0c55 struct hci_rp_read_ssp_mode { __u8 status; __u8 mode; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_WRITE_SSP_MODE 0x0c56 struct hci_cp_write_ssp_mode { __u8 mode; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_VERSION 0x1001 struct hci_rp_read_local_version { @@ -553,19 +556,19 @@ struct hci_rp_read_local_version { __u8 lmp_ver; __le16 manufacturer; __le16 lmp_subver; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_COMMANDS 0x1002 struct hci_rp_read_local_commands { __u8 status; __u8 commands[64]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_FEATURES 0x1003 struct hci_rp_read_local_features { __u8 status; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004 struct hci_rp_read_local_ext_features { @@ -573,7 +576,7 @@ struct hci_rp_read_local_ext_features { __u8 page; __u8 max_page; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_BUFFER_SIZE 0x1005 struct hci_rp_read_buffer_size { @@ -582,13 +585,13 @@ struct hci_rp_read_buffer_size { __u8 sco_mtu; __le16 acl_max_pkt; __le16 sco_max_pkt; -} __attribute__ ((packed)); +} __packed; #define HCI_OP_READ_BD_ADDR 0x1009 struct hci_rp_read_bd_addr { __u8 status; bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 @@ -601,7 +604,7 @@ struct inquiry_info { __u8 pscan_mode; __u8 dev_class[3]; __le16 clock_offset; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CONN_COMPLETE 0x03 struct hci_ev_conn_complete { @@ -610,54 +613,54 @@ struct hci_ev_conn_complete { bdaddr_t bdaddr; __u8 link_type; __u8 encr_mode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CONN_REQUEST 0x04 struct hci_ev_conn_request { bdaddr_t bdaddr; __u8 dev_class[3]; __u8 link_type; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_DISCONN_COMPLETE 0x05 struct hci_ev_disconn_complete { __u8 status; __le16 handle; __u8 reason; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_AUTH_COMPLETE 0x06 struct hci_ev_auth_complete { __u8 status; __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_NAME 0x07 struct hci_ev_remote_name { __u8 status; bdaddr_t bdaddr; __u8 name[248]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_ENCRYPT_CHANGE 0x08 struct hci_ev_encrypt_change { __u8 status; __le16 handle; __u8 encrypt; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09 struct hci_ev_change_link_key_complete { __u8 status; __le16 handle; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_FEATURES 0x0b struct hci_ev_remote_features { __u8 status; __le16 handle; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_VERSION 0x0c struct hci_ev_remote_version { @@ -666,7 +669,7 @@ struct hci_ev_remote_version { __u8 lmp_ver; __le16 manufacturer; __le16 lmp_subver; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_QOS_SETUP_COMPLETE 0x0d struct hci_qos { @@ -675,38 +678,38 @@ struct hci_qos { __u32 peak_bandwidth; __u32 latency; __u32 delay_variation; -} __attribute__ ((packed)); +} __packed; struct hci_ev_qos_setup_complete { __u8 status; __le16 handle; struct hci_qos qos; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CMD_COMPLETE 0x0e struct hci_ev_cmd_complete { __u8 ncmd; __le16 opcode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CMD_STATUS 0x0f struct hci_ev_cmd_status { __u8 status; __u8 ncmd; __le16 opcode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_ROLE_CHANGE 0x12 struct hci_ev_role_change { __u8 status; bdaddr_t bdaddr; __u8 role; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_NUM_COMP_PKTS 0x13 struct hci_ev_num_comp_pkts { __u8 num_hndl; /* variable length part */ -} __attribute__ ((packed)); +} __packed; #define HCI_EV_MODE_CHANGE 0x14 struct hci_ev_mode_change { @@ -714,44 +717,44 @@ struct hci_ev_mode_change { __le16 handle; __u8 mode; __le16 interval; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_PIN_CODE_REQ 0x16 struct hci_ev_pin_code_req { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_LINK_KEY_REQ 0x17 struct hci_ev_link_key_req { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_LINK_KEY_NOTIFY 0x18 struct hci_ev_link_key_notify { bdaddr_t bdaddr; __u8 link_key[16]; __u8 key_type; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_CLOCK_OFFSET 0x1c struct hci_ev_clock_offset { __u8 status; __le16 handle; __le16 clock_offset; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_PKT_TYPE_CHANGE 0x1d struct hci_ev_pkt_type_change { __u8 status; __le16 handle; __le16 pkt_type; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_PSCAN_REP_MODE 0x20 struct hci_ev_pscan_rep_mode { bdaddr_t bdaddr; __u8 pscan_rep_mode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 struct inquiry_info_with_rssi { @@ -761,7 +764,7 @@ struct inquiry_info_with_rssi { __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; -} __attribute__ ((packed)); +} __packed; struct inquiry_info_with_rssi_and_pscan_mode { bdaddr_t bdaddr; __u8 pscan_rep_mode; @@ -770,7 +773,7 @@ struct inquiry_info_with_rssi_and_pscan_mode { __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_EXT_FEATURES 0x23 struct hci_ev_remote_ext_features { @@ -779,7 +782,7 @@ struct hci_ev_remote_ext_features { __u8 page; __u8 max_page; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SYNC_CONN_COMPLETE 0x2c struct hci_ev_sync_conn_complete { @@ -792,7 +795,7 @@ struct hci_ev_sync_conn_complete { __le16 rx_pkt_len; __le16 tx_pkt_len; __u8 air_mode; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SYNC_CONN_CHANGED 0x2d struct hci_ev_sync_conn_changed { @@ -802,7 +805,7 @@ struct hci_ev_sync_conn_changed { __u8 retrans_window; __le16 rx_pkt_len; __le16 tx_pkt_len; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SNIFF_SUBRATE 0x2e struct hci_ev_sniff_subrate { @@ -812,7 +815,7 @@ struct hci_ev_sniff_subrate { __le16 max_rx_latency; __le16 max_remote_timeout; __le16 max_local_timeout; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f struct extended_inquiry_info { @@ -823,37 +826,37 @@ struct extended_inquiry_info { __le16 clock_offset; __s8 rssi; __u8 data[240]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_IO_CAPA_REQUEST 0x31 struct hci_ev_io_capa_request { bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36 struct hci_ev_simple_pair_complete { __u8 status; bdaddr_t bdaddr; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_REMOTE_HOST_FEATURES 0x3d struct hci_ev_remote_host_features { bdaddr_t bdaddr; __u8 features[8]; -} __attribute__ ((packed)); +} __packed; /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { __u16 type; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SI_DEVICE 0x01 struct hci_ev_si_device { __u16 event; __u16 dev_id; -} __attribute__ ((packed)); +} __packed; #define HCI_EV_SI_SECURITY 0x02 struct hci_ev_si_security { @@ -861,7 +864,7 @@ struct hci_ev_si_security { __u16 proto; __u16 subproto; __u8 incoming; -} __attribute__ ((packed)); +} __packed; /* ---- HCI Packet structures ---- */ #define HCI_COMMAND_HDR_SIZE 3 @@ -872,22 +875,22 @@ struct hci_ev_si_security { struct hci_command_hdr { __le16 opcode; /* OCF & OGF */ __u8 plen; -} __attribute__ ((packed)); +} __packed; struct hci_event_hdr { __u8 evt; __u8 plen; -} __attribute__ ((packed)); +} __packed; struct hci_acl_hdr { __le16 handle; /* Handle & Flags(PB, BC) */ __le16 dlen; -} __attribute__ ((packed)); +} __packed; struct hci_sco_hdr { __le16 handle; __u8 dlen; -} __attribute__ ((packed)); +} __packed; #ifdef __KERNEL__ #include diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index e42f6ed5421cabd05f653ba24f1acc09c1a66844..8b28962e737ececccfe590a70f034e8eee66969a 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1,6 +1,6 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux - Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky @@ -12,13 +12,13 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ @@ -62,6 +62,11 @@ struct hci_conn_hash { unsigned int sco_num; }; +struct bdaddr_list { + struct list_head list; + bdaddr_t bdaddr; +}; +#define NUM_REASSEMBLY 4 struct hci_dev { struct list_head list; spinlock_t lock; @@ -118,7 +123,7 @@ struct hci_dev { struct sk_buff_head cmd_q; struct sk_buff *sent_cmd; - struct sk_buff *reassembly[3]; + struct sk_buff *reassembly[NUM_REASSEMBLY]; struct mutex req_lock; wait_queue_head_t req_wait_q; @@ -127,6 +132,7 @@ struct hci_dev { struct inquiry_cache inq_cache; struct hci_conn_hash conn_hash; + struct bdaddr_list blacklist; struct hci_dev_stats stat; @@ -250,6 +256,7 @@ enum { HCI_CONN_ENCRYPT_PEND, HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, + HCI_CONN_SCO_SETUP_PEND, }; static inline void hci_conn_hash_init(struct hci_dev *hdev) @@ -330,6 +337,7 @@ void hci_acl_connect(struct hci_conn *conn); void hci_acl_disconn(struct hci_conn *conn, __u8 reason); void hci_add_sco(struct hci_conn *conn, __u16 handle); void hci_setup_sync(struct hci_conn *conn, __u16 handle); +void hci_sco_setup(struct hci_conn *conn, __u8 status); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); int hci_conn_del(struct hci_conn *conn); @@ -380,7 +388,7 @@ static inline void __hci_dev_put(struct hci_dev *d) } static inline void hci_dev_put(struct hci_dev *d) -{ +{ __hci_dev_put(d); module_put(d->owner); } @@ -424,10 +432,14 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); int hci_inquiry(void __user *arg); +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); +int hci_blacklist_clear(struct hci_dev *hdev); + void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_frame(struct sk_buff *skb); int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); +int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); int hci_register_sysfs(struct hci_dev *hdev); void hci_unregister_sysfs(struct hci_dev *hdev); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 7c695bfd853cb44108b83d24ebf2afe56c875f93..636724b203eef0f2c6fa519fe626df2a707029d3 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -1,6 +1,8 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (C) 2009-2010 Gustavo F. Padovan + Copyright (C) 2010 Google Inc. Written 2000,2001 by Maxim Krasnyansky @@ -129,31 +131,31 @@ struct l2cap_conninfo { struct l2cap_hdr { __le16 len; __le16 cid; -} __attribute__ ((packed)); +} __packed; #define L2CAP_HDR_SIZE 4 struct l2cap_cmd_hdr { __u8 code; __u8 ident; __le16 len; -} __attribute__ ((packed)); +} __packed; #define L2CAP_CMD_HDR_SIZE 4 struct l2cap_cmd_rej { __le16 reason; -} __attribute__ ((packed)); +} __packed; struct l2cap_conn_req { __le16 psm; __le16 scid; -} __attribute__ ((packed)); +} __packed; struct l2cap_conn_rsp { __le16 dcid; __le16 scid; __le16 result; __le16 status; -} __attribute__ ((packed)); +} __packed; /* channel indentifier */ #define L2CAP_CID_SIGNALING 0x0001 @@ -177,14 +179,14 @@ struct l2cap_conf_req { __le16 dcid; __le16 flags; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; struct l2cap_conf_rsp { __le16 scid; __le16 flags; __le16 result; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; #define L2CAP_CONF_SUCCESS 0x0000 #define L2CAP_CONF_UNACCEPT 0x0001 @@ -195,7 +197,7 @@ struct l2cap_conf_opt { __u8 type; __u8 len; __u8 val[0]; -} __attribute__ ((packed)); +} __packed; #define L2CAP_CONF_OPT_SIZE 2 #define L2CAP_CONF_HINT 0x80 @@ -216,7 +218,7 @@ struct l2cap_conf_rfc { __le16 retrans_timeout; __le16 monitor_timeout; __le16 max_pdu_size; -} __attribute__ ((packed)); +} __packed; #define L2CAP_MODE_BASIC 0x00 #define L2CAP_MODE_RETRANS 0x01 @@ -227,22 +229,22 @@ struct l2cap_conf_rfc { struct l2cap_disconn_req { __le16 dcid; __le16 scid; -} __attribute__ ((packed)); +} __packed; struct l2cap_disconn_rsp { __le16 dcid; __le16 scid; -} __attribute__ ((packed)); +} __packed; struct l2cap_info_req { __le16 type; -} __attribute__ ((packed)); +} __packed; struct l2cap_info_rsp { __le16 type; __le16 result; __u8 data[0]; -} __attribute__ ((packed)); +} __packed; /* info type */ #define L2CAP_IT_CL_MTU 0x0001 @@ -287,6 +289,11 @@ struct l2cap_conn { struct l2cap_chan_list chan_list; }; +struct sock_del_list { + struct sock *sk; + struct list_head list; +}; + #define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 #define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04 #define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08 @@ -353,7 +360,6 @@ struct l2cap_pinfo { __le16 sport; - spinlock_t send_lock; struct timer_list retrans_timer; struct timer_list monitor_timer; struct timer_list ack_timer; diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index 921d7b3c7f8d2098e6390d3d60195628cae55fc1..a140847d622c4e5ad4dc252bd117a0d2e1861efe 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -106,19 +106,19 @@ struct rfcomm_hdr { u8 addr; u8 ctrl; u8 len; // Actual size can be 2 bytes -} __attribute__ ((packed)); +} __packed; struct rfcomm_cmd { u8 addr; u8 ctrl; u8 len; u8 fcs; -} __attribute__ ((packed)); +} __packed; struct rfcomm_mcc { u8 type; u8 len; -} __attribute__ ((packed)); +} __packed; struct rfcomm_pn { u8 dlci; @@ -128,7 +128,7 @@ struct rfcomm_pn { __le16 mtu; u8 max_retrans; u8 credits; -} __attribute__ ((packed)); +} __packed; struct rfcomm_rpn { u8 dlci; @@ -138,17 +138,17 @@ struct rfcomm_rpn { u8 xon_char; u8 xoff_char; __le16 param_mask; -} __attribute__ ((packed)); +} __packed; struct rfcomm_rls { u8 dlci; u8 status; -} __attribute__ ((packed)); +} __packed; struct rfcomm_msc { u8 dlci; u8 v24_sig; -} __attribute__ ((packed)); +} __packed; /* ---- Core structures, flags etc ---- */ diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h index 318ab9478a446e078e3b466db81c1252b246670f..6da573c75d54cfdde09f449f6cd3dd617c49fa20 100644 --- a/include/net/caif/caif_dev.h +++ b/include/net/caif/caif_dev.h @@ -50,6 +50,9 @@ struct caif_connect_request { * @client_layer: User implementation of client layer. This layer * MUST have receive and control callback functions * implemented. + * @ifindex: Link layer interface index used for this connection. + * @headroom: Head room needed by CAIF protocol. + * @tailroom: Tail room needed by CAIF protocol. * * This function connects a CAIF channel. The Client must implement * the struct cflayer. This layer represents the Client layer and holds @@ -59,8 +62,9 @@ struct caif_connect_request { * E.g. CAIF Socket will call this function for each socket it connects * and have one client_layer instance for each socket. */ -int caif_connect_client(struct caif_connect_request *config, - struct cflayer *client_layer); +int caif_connect_client(struct caif_connect_request *conn_req, + struct cflayer *client_layer, int *ifindex, + int *headroom, int *tailroom); /** * caif_disconnect_client - Disconnects a client from the CAIF stack. diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h index 25c472f0e5b84fe75d1e896c9c4e53ccab0751d3..c8b07a904e78958308c9237d99d578e7512d5109 100644 --- a/include/net/caif/caif_layer.h +++ b/include/net/caif/caif_layer.h @@ -15,14 +15,8 @@ struct cfpktq; struct caif_payload_info; struct caif_packet_funcs; -#define CAIF_MAX_FRAMESIZE 4096 -#define CAIF_MAX_PAYLOAD_SIZE (4096 - 64) -#define CAIF_NEEDED_HEADROOM (10) -#define CAIF_NEEDED_TAILROOM (2) #define CAIF_LAYER_NAME_SZ 16 -#define CAIF_SUCCESS 1 -#define CAIF_FAILURE 0 /** * caif_assert() - Assert function for CAIF. diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h new file mode 100644 index 0000000000000000000000000000000000000000..ce4570dff020720c2ca6d5e22a0202a78c5c42c5 --- /dev/null +++ b/include/net/caif/caif_spi.h @@ -0,0 +1,153 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Daniel Martensson / Daniel.Martensson@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CAIF_SPI_H_ +#define CAIF_SPI_H_ + +#include + +#define SPI_CMD_WR 0x00 +#define SPI_CMD_RD 0x01 +#define SPI_CMD_EOT 0x02 +#define SPI_CMD_IND 0x04 + +#define SPI_DMA_BUF_LEN 8192 + +#define WL_SZ 2 /* 16 bits. */ +#define SPI_CMD_SZ 4 /* 32 bits. */ +#define SPI_IND_SZ 4 /* 32 bits. */ + +#define SPI_XFER 0 +#define SPI_SS_ON 1 +#define SPI_SS_OFF 2 +#define SPI_TERMINATE 3 + +/* Minimum time between different levels is 50 microseconds. */ +#define MIN_TRANSITION_TIME_USEC 50 + +/* Defines for calculating duration of SPI transfers for a particular + * number of bytes. + */ +#define SPI_MASTER_CLK_MHZ 13 +#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk) + +/* Normally this should be aligned on the modem in order to benefit from full + * duplex transfers. However a size of 8188 provokes errors when running with + * the modem. These errors occur when packet sizes approaches 4 kB of data. + */ +#define CAIF_MAX_SPI_FRAME 4092 + +/* Maximum number of uplink CAIF frames that can reside in the same SPI frame. + * This number should correspond with the modem setting. The application side + * CAIF accepts any number of embedded downlink CAIF frames. + */ +#define CAIF_MAX_SPI_PKTS 9 + +/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier + * debugging. Both TX and RX buffers will be filled before the transfer. + */ +#define CFSPI_DBG_PREFILL 0 + +/* Structure describing a SPI transfer. */ +struct cfspi_xfer { + u16 tx_dma_len; + u16 rx_dma_len; + void *va_tx; + dma_addr_t pa_tx; + void *va_rx; + dma_addr_t pa_rx; +}; + +/* Structure implemented by the SPI interface. */ +struct cfspi_ifc { + void (*ss_cb) (bool assert, struct cfspi_ifc *ifc); + void (*xfer_done_cb) (struct cfspi_ifc *ifc); + void *priv; +}; + +/* Structure implemented by SPI clients. */ +struct cfspi_dev { + int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev); + void (*sig_xfer) (bool xfer, struct cfspi_dev *dev); + struct cfspi_ifc *ifc; + char *name; + u32 clk_mhz; + void *priv; +}; + +/* Enumeration describing the CAIF SPI state. */ +enum cfspi_state { + CFSPI_STATE_WAITING = 0, + CFSPI_STATE_AWAKE, + CFSPI_STATE_FETCH_PKT, + CFSPI_STATE_GET_NEXT, + CFSPI_STATE_INIT_XFER, + CFSPI_STATE_WAIT_ACTIVE, + CFSPI_STATE_SIG_ACTIVE, + CFSPI_STATE_WAIT_XFER_DONE, + CFSPI_STATE_XFER_DONE, + CFSPI_STATE_WAIT_INACTIVE, + CFSPI_STATE_SIG_INACTIVE, + CFSPI_STATE_DELIVER_PKT, + CFSPI_STATE_MAX, +}; + +/* Structure implemented by SPI physical interfaces. */ +struct cfspi { + struct caif_dev_common cfdev; + struct net_device *ndev; + struct platform_device *pdev; + struct sk_buff_head qhead; + struct sk_buff_head chead; + u16 cmd; + u16 tx_cpck_len; + u16 tx_npck_len; + u16 rx_cpck_len; + u16 rx_npck_len; + struct cfspi_ifc ifc; + struct cfspi_xfer xfer; + struct cfspi_dev *dev; + unsigned long state; + struct work_struct work; + struct workqueue_struct *wq; + struct list_head list; + int flow_off_sent; + u32 qd_low_mark; + u32 qd_high_mark; + struct completion comp; + wait_queue_head_t wait; + spinlock_t lock; + bool flow_stop; +#ifdef CONFIG_DEBUG_FS + enum cfspi_state dbg_state; + u16 pcmd; + u16 tx_ppck_len; + u16 rx_ppck_len; + struct dentry *dbgfs_dir; + struct dentry *dbgfs_state; + struct dentry *dbgfs_frame; +#endif /* CONFIG_DEBUG_FS */ +}; + +extern int spi_frm_align; +extern int spi_up_head_align; +extern int spi_up_tail_align; +extern int spi_down_head_align; +extern int spi_down_tail_align; +extern struct platform_driver cfspi_spi_driver; + +void cfspi_dbg_state(struct cfspi *cfspi, int state); +int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len); +int cfspi_xmitlen(struct cfspi *cfspi); +int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len); +int cfspi_spi_remove(struct platform_device *pdev); +int cfspi_spi_probe(struct platform_device *pdev); +int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len); +int cfspi_xmitlen(struct cfspi *cfspi); +int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len); +void cfspi_xfer(struct work_struct *work); + +#endif /* CAIF_SPI_H_ */ diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h index 9fc2fc20b884287ab706de28f08d4dadc3f3906a..bd646faffa47bd8cd46ed4f15e1988c7bfac8247 100644 --- a/include/net/caif/cfcnfg.h +++ b/include/net/caif/cfcnfg.h @@ -7,6 +7,7 @@ #ifndef CFCNFG_H_ #define CFCNFG_H_ #include +#include #include #include @@ -73,8 +74,8 @@ void cfcnfg_remove(struct cfcnfg *cfg); void cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, - void *dev, struct cflayer *phy_layer, u16 *phyid, - enum cfcnfg_phy_preference pref, + struct net_device *dev, struct cflayer *phy_layer, + u16 *phyid, enum cfcnfg_phy_preference pref, bool fcs, bool stx); /** @@ -114,11 +115,18 @@ void cfcnfg_release_adap_layer(struct cflayer *adap_layer); * @param: Link setup parameters. * @adap_layer: Specify the adaptation layer; the receive and * flow-control functions MUST be set in the structure. - * + * @ifindex: Link layer interface index used for this connection. + * @proto_head: Protocol head-space needed by CAIF protocol, + * excluding link layer. + * @proto_tail: Protocol tail-space needed by CAIF protocol, + * excluding link layer. */ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, struct cfctrl_link_param *param, - struct cflayer *adap_layer); + struct cflayer *adap_layer, + int *ifindex, + int *proto_head, + int *proto_tail); /** * cfcnfg_get_phyid() - Get physical ID, given type. diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h index 2dc9eb193ecf8ae3fcab37586a28a08310472986..b1fa87ee0992c1b8f133338f724cd72f45e2204a 100644 --- a/include/net/caif/cfsrvl.h +++ b/include/net/caif/cfsrvl.h @@ -16,6 +16,8 @@ struct cfsrvl { bool open; bool phy_flow_on; bool modem_flow_on; + bool supports_flowctrl; + void (*release)(struct kref *); struct dev_info dev_info; struct kref ref; }; @@ -25,13 +27,15 @@ struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info); struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info); struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info); struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info); -struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info); +struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info, + int mtu_size); struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info); bool cfsrvl_phyid_match(struct cflayer *layer, int phyid); void cfservl_destroy(struct cflayer *layer); void cfsrvl_init(struct cfsrvl *service, - u8 channel_id, - struct dev_info *dev_info); + u8 channel_id, + struct dev_info *dev_info, + bool supports_flowctrl); bool cfsrvl_ready(struct cfsrvl *service, int *err); u8 cfsrvl_getphyid(struct cflayer *layer); @@ -50,7 +54,10 @@ static inline void cfsrvl_put(struct cflayer *layr) if (layr == NULL) return; s = container_of(layr, struct cfsrvl, layer); - kref_put(&s->ref, cfsrvl_release); + + WARN_ON(!s->release); + if (s->release) + kref_put(&s->ref, s->release); } #endif /* CFSRVL_H_ */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b44a2e5321a365468312d56ac7285c8ff09129ed..2fd06c60ffbba1b81af86303d46e5187f98e946d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -37,6 +37,7 @@ * * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7) + * @IEEE80211_NUM_BANDS: number of defined bands */ enum ieee80211_band { IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ, @@ -89,7 +90,7 @@ enum ieee80211_channel_flags { * @max_power: maximum transmission power (in dBm) * @beacon_found: helper to regulatory code to indicate when a beacon * has been found on this channel. Use regulatory_hint_found_beacon() - * to enable this, this is is useful only on 5 GHz band. + * to enable this, this is useful only on 5 GHz band. * @orig_mag: internal use * @orig_mpwr: internal use */ @@ -188,6 +189,7 @@ struct ieee80211_sta_ht_cap { * in this band. Must be sorted to give a valid "supported * rates" IE, i.e. CCK rates first, then OFDM. * @n_bitrates: Number of bitrates in @bitrates + * @ht_cap: HT capabilities in this band */ struct ieee80211_supported_band { struct ieee80211_channel *channels; @@ -225,6 +227,7 @@ struct vif_params { * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used * with the get_key() callback, must be in little endian, * length given by @seq_len. + * @seq_len: length of @seq. */ struct key_params { u8 *key; @@ -237,6 +240,8 @@ struct key_params { /** * enum survey_info_flags - survey information flags * + * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in + * * Used by the driver to indicate which info in &struct survey_info * it has filled in during the get_survey(). */ @@ -247,13 +252,13 @@ enum survey_info_flags { /** * struct survey_info - channel survey response * - * Used by dump_survey() to report back per-channel survey information. - * * @channel: the channel this survey record reports, mandatory * @filled: bitflag of flags from &enum survey_info_flags * @noise: channel noise in dBm. This and all following fields are * optional * + * Used by dump_survey() to report back per-channel survey information. + * * This structure can later be expanded with things like * channel duty cycle etc. */ @@ -288,7 +293,7 @@ struct beacon_parameters { * * @PLINK_ACTION_INVALID: action 0 is reserved * @PLINK_ACTION_OPEN: start mesh peer link establishment - * @PLINK_ACTION_BLOCL: block traffic from this mesh peer + * @PLINK_ACTION_BLOCK: block traffic from this mesh peer */ enum plink_actions { PLINK_ACTION_INVALID, @@ -311,6 +316,8 @@ enum plink_actions { * (bitmask of BIT(NL80211_STA_FLAG_...)) * @listen_interval: listen interval or -1 for no change * @aid: AID or zero for no change + * @plink_action: plink action to take + * @ht_capa: HT capabilities of station */ struct station_parameters { u8 *supported_rates; @@ -448,13 +455,13 @@ enum monitor_flags { * Used by the driver to indicate which info in &struct mpath_info it has filled * in during get_station() or dump_station(). * - * MPATH_INFO_FRAME_QLEN: @frame_qlen filled - * MPATH_INFO_SN: @sn filled - * MPATH_INFO_METRIC: @metric filled - * MPATH_INFO_EXPTIME: @exptime filled - * MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled - * MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled - * MPATH_INFO_FLAGS: @flags filled + * @MPATH_INFO_FRAME_QLEN: @frame_qlen filled + * @MPATH_INFO_SN: @sn filled + * @MPATH_INFO_METRIC: @metric filled + * @MPATH_INFO_EXPTIME: @exptime filled + * @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled + * @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled + * @MPATH_INFO_FLAGS: @flags filled */ enum mpath_info_flags { MPATH_INFO_FRAME_QLEN = BIT(0), @@ -587,6 +594,7 @@ struct cfg80211_ssid { * @ie_len: length of ie in octets * @wiphy: the wiphy this was for * @dev: the interface + * @aborted: (internal) scan request was notified as aborted */ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; @@ -623,6 +631,7 @@ enum cfg80211_signal_type { * This structure describes a BSS (which may also be a mesh network) * for use in scan results and similar. * + * @channel: channel this BSS is on * @bssid: BSSID of the BSS * @tsf: timestamp of last received update * @beacon_interval: the beacon interval as from the frame @@ -801,6 +810,7 @@ struct cfg80211_disassoc_request { * @beacon_interval: beacon interval to use * @privacy: this is a protected network, keys will be configured * after joining + * @basic_rates: bitmap of basic rates to use when creating the IBSS */ struct cfg80211_ibss_params { u8 *ssid; @@ -809,6 +819,7 @@ struct cfg80211_ibss_params { u8 *ie; u8 ssid_len, ie_len; u16 beacon_interval; + u32 basic_rates; bool channel_fixed; bool privacy; }; @@ -826,8 +837,8 @@ struct cfg80211_ibss_params { * @ssid: SSID * @ssid_len: Length of ssid in octets * @auth_type: Authentication type (algorithm) - * @assoc_ie: IEs for association request - * @assoc_ie_len: Length of assoc_ie in octets + * @ie: IEs for association request + * @ie_len: Length of assoc_ie in octets * @privacy: indicates whether privacy-enabled APs should be used * @crypto: crypto settings * @key_len: length of WEP key for shared key authentication @@ -850,10 +861,11 @@ struct cfg80211_connect_params { /** * enum wiphy_params_flags - set_wiphy_params bitfield values - * WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed - * WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed - * WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed - * WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed + * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed + * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed + * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed + * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed + * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed */ enum wiphy_params_flags { WIPHY_PARAM_RETRY_SHORT = 1 << 0, @@ -863,19 +875,6 @@ enum wiphy_params_flags { WIPHY_PARAM_COVERAGE_CLASS = 1 << 4, }; -/** - * enum tx_power_setting - TX power adjustment - * - * @TX_POWER_AUTOMATIC: the dbm parameter is ignored - * @TX_POWER_LIMITED: limit TX power by the dbm parameter - * @TX_POWER_FIXED: fix TX power to the dbm parameter - */ -enum tx_power_setting { - TX_POWER_AUTOMATIC, - TX_POWER_LIMITED, - TX_POWER_FIXED, -}; - /* * cfg80211_bitrate_mask - masks for bitrate control */ @@ -949,10 +948,16 @@ struct cfg80211_pmksa { * @del_beacon: Remove beacon configuration and stop sending the beacon. * * @add_station: Add a new station. - * * @del_station: Remove a station; @mac may be NULL to remove all stations. - * * @change_station: Modify a given station. + * @get_station: get station information for the station identified by @mac + * @dump_station: dump station callback -- resume dump at index @idx + * + * @add_mpath: add a fixed mesh path + * @del_mpath: delete a given mesh path + * @change_mpath: change a given mesh path + * @get_mpath: get a mesh path for the given parameters + * @dump_mpath: dump mesh path callback -- resume dump at index @idx * * @get_mesh_params: Put the current mesh parameters into *params * @@ -960,8 +965,6 @@ struct cfg80211_pmksa { * The mask is a bitfield which tells us which parameters to * set, and which to leave alone. * - * @set_mesh_cfg: set mesh parameters (by now, just mesh id) - * * @change_bss: Modify parameters for a given BSS. * * @set_txq_params: Set TX queue parameters @@ -1002,6 +1005,8 @@ struct cfg80211_pmksa { * @get_tx_power: store the current TX power into the dbm variable; * return 0 if successful * + * @set_wds_peer: set the WDS peer for a WDS interface + * * @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting * functions to adjust rfkill hw state * @@ -1019,6 +1024,8 @@ struct cfg80211_pmksa { * * @testmode_cmd: run a test mode command * + * @set_bitrate_mask: set the bitrate mask configuration + * * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac * devices running firmwares capable of generating the (re) association * RSN IE. It allows for faster roaming between WPA2 BSSIDs. @@ -1129,7 +1136,7 @@ struct cfg80211_ops { int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed); int (*set_tx_power)(struct wiphy *wiphy, - enum tx_power_setting type, int dbm); + enum nl80211_tx_power_setting type, int mbm); int (*get_tx_power)(struct wiphy *wiphy, int *dbm); int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev, @@ -1168,6 +1175,7 @@ struct cfg80211_ops { int (*action)(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, + bool channel_type_valid, const u8 *buf, size_t len, u64 *cookie); int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev, @@ -1230,8 +1238,6 @@ struct mac_address { /** * struct wiphy - wireless hardware description - * @idx: the wiphy index assigned to this item - * @class_dev: the class device representing /sys/class/ieee80211/ * @reg_notifier: the driver's regulatory notification callback * @regd: the driver's regulatory domain, if one was requested via * the regulatory_hint() API. This can be used by the driver @@ -1245,7 +1251,7 @@ struct mac_address { * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold); * -1 = fragmentation disabled, only odd values >= 256 used * @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled - * @net: the network namespace this wiphy currently lives in + * @_net: the network namespace this wiphy currently lives in * @perm_addr: permanent MAC address of this device * @addr_mask: If the device supports multiple MAC addresses by masking, * set this to a mask with variable bits set to 1, e.g. if the last @@ -1258,6 +1264,28 @@ struct mac_address { * by default for perm_addr. In this case, the mask should be set to * all-zeroes. In this case it is assumed that the device can handle * the same number of arbitrary MAC addresses. + * @debugfsdir: debugfs directory used for this wiphy, will be renamed + * automatically on wiphy renames + * @dev: (virtual) struct device for this wiphy + * @wext: wireless extension handlers + * @priv: driver private data (sized according to wiphy_new() parameter) + * @interface_modes: bitmask of interfaces types valid for this wiphy, + * must be set by driver + * @flags: wiphy flags, see &enum wiphy_flags + * @bss_priv_size: each BSS struct has private data allocated with it, + * this variable determines its size + * @max_scan_ssids: maximum number of SSIDs the device can scan for in + * any given scan + * @max_scan_ie_len: maximum length of user-controlled IEs device can + * add to probe request frames transmitted during a scan, must not + * include fixed IEs like supported rates + * @coverage_class: current coverage class + * @fw_version: firmware version for ethtool reporting + * @hw_version: hardware version for ethtool reporting + * @max_num_pmkids: maximum number of PMKIDs supported by device + * @privid: a pointer that drivers can use to identify if an arbitrary + * wiphy is theirs, e.g. in global notifiers + * @bands: information about bands/channels supported by this device */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -1330,27 +1358,16 @@ struct wiphy { char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); }; -#ifdef CONFIG_NET_NS static inline struct net *wiphy_net(struct wiphy *wiphy) { - return wiphy->_net; + return read_pnet(&wiphy->_net); } static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) { - wiphy->_net = net; -} -#else -static inline struct net *wiphy_net(struct wiphy *wiphy) -{ - return &init_net; + write_pnet(&wiphy->_net, net); } -static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) -{ -} -#endif - /** * wiphy_priv - return priv from wiphy * @@ -1399,7 +1416,7 @@ static inline struct device *wiphy_dev(struct wiphy *wiphy) * * @wiphy: The wiphy whose name to return */ -static inline const char *wiphy_name(struct wiphy *wiphy) +static inline const char *wiphy_name(const struct wiphy *wiphy) { return dev_name(&wiphy->dev); } @@ -1471,13 +1488,14 @@ struct cfg80211_cached_keys; * @ssid: (private) Used by the internal configuration code * @ssid_len: (private) Used by the internal configuration code * @wext: (private) Used by the internal wireless extensions compat code - * @wext_bssid: (private) Used by the internal wireless extensions compat code * @use_4addr: indicates 4addr mode is used on this interface, must be * set by driver (if supported) on add_interface BEFORE registering the * netdev and may otherwise be used by driver read-only, will be update * by cfg80211 on change_interface * @action_registrations: list of registrations for action frames * @action_registrations_lock: lock for the list + * @mtx: mutex used to lock data in this struct + * @cleanup_work: work struct used for cleanup that can't be done directly */ struct wireless_dev { struct wiphy *wiphy; @@ -1551,11 +1569,13 @@ static inline void *wdev_priv(struct wireless_dev *wdev) /** * ieee80211_channel_to_frequency - convert channel number to frequency + * @chan: channel number */ extern int ieee80211_channel_to_frequency(int chan); /** * ieee80211_frequency_to_channel - convert frequency to channel number + * @freq: center frequency */ extern int ieee80211_frequency_to_channel(int freq); @@ -1570,6 +1590,8 @@ extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, int freq); /** * ieee80211_get_channel - get channel struct from wiphy for specified frequency + * @wiphy: the struct wiphy to get the channel for + * @freq: the center frequency of the channel */ static inline struct ieee80211_channel * ieee80211_get_channel(struct wiphy *wiphy, int freq) @@ -1630,9 +1652,6 @@ struct ieee80211_radiotap_vendor_namespaces { * @is_radiotap_ns: indicates whether the current namespace is the default * radiotap namespace or not * - * @overrides: override standard radiotap fields - * @n_overrides: number of overrides - * * @_rtheader: pointer to the radiotap header we are walking through * @_max_length: length of radiotap header in cpu byte ordering * @_arg_index: next argument index @@ -1933,6 +1952,10 @@ int cfg80211_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra); +int cfg80211_wext_siwpmksa(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra); + /* * callbacks for asynchronous cfg80211 methods, notification * functions and BSS handling helpers @@ -1948,10 +1971,12 @@ int cfg80211_wext_giwap(struct net_device *dev, void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted); /** - * cfg80211_inform_bss - inform cfg80211 of a new BSS + * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame * * @wiphy: the wiphy reporting the BSS - * @bss: the found BSS + * @channel: The channel the frame was received on + * @mgmt: the management frame (probe response or beacon) + * @len: length of the management frame * @signal: the signal strength, type depends on the wiphy's signal_type * @gfp: context flags * @@ -1964,6 +1989,23 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len, s32 signal, gfp_t gfp); +/** + * cfg80211_inform_bss - inform cfg80211 of a new BSS + * + * @wiphy: the wiphy reporting the BSS + * @channel: The channel the frame was received on + * @bssid: the BSSID of the BSS + * @timestamp: the TSF timestamp sent by the peer + * @capability: the capability field sent by the peer + * @beacon_interval: the beacon interval announced by the peer + * @ie: additional IEs sent by the peer + * @ielen: length of the additional IEs + * @signal: the signal strength, type depends on the wiphy's signal_type + * @gfp: context flags + * + * This informs cfg80211 that BSS information was found and + * the BSS should be updated/added. + */ struct cfg80211_bss* cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, @@ -2378,4 +2420,67 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, gfp_t gfp); +#ifdef __KERNEL__ + +/* Logging, debugging and troubleshooting/diagnostic helpers. */ + +/* wiphy_printk helpers, similar to dev_printk */ + +#define wiphy_printk(level, wiphy, format, args...) \ + printk(level "%s: " format, wiphy_name(wiphy), ##args) +#define wiphy_emerg(wiphy, format, args...) \ + wiphy_printk(KERN_EMERG, wiphy, format, ##args) +#define wiphy_alert(wiphy, format, args...) \ + wiphy_printk(KERN_ALERT, wiphy, format, ##args) +#define wiphy_crit(wiphy, format, args...) \ + wiphy_printk(KERN_CRIT, wiphy, format, ##args) +#define wiphy_err(wiphy, format, args...) \ + wiphy_printk(KERN_ERR, wiphy, format, ##args) +#define wiphy_warn(wiphy, format, args...) \ + wiphy_printk(KERN_WARNING, wiphy, format, ##args) +#define wiphy_notice(wiphy, format, args...) \ + wiphy_printk(KERN_NOTICE, wiphy, format, ##args) +#define wiphy_info(wiphy, format, args...) \ + wiphy_printk(KERN_INFO, wiphy, format, ##args) + +int wiphy_debug(const struct wiphy *wiphy, const char *format, ...) + __attribute__ ((format (printf, 2, 3))); + +#if defined(DEBUG) +#define wiphy_dbg(wiphy, format, args...) \ + wiphy_printk(KERN_DEBUG, wiphy, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define wiphy_dbg(wiphy, format, args...) \ + dynamic_pr_debug("%s: " format, wiphy_name(wiphy), ##args) +#else +#define wiphy_dbg(wiphy, format, args...) \ +({ \ + if (0) \ + wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ + 0; \ +}) +#endif + +#if defined(VERBOSE_DEBUG) +#define wiphy_vdbg wiphy_dbg +#else + +#define wiphy_vdbg(wiphy, format, args...) \ +({ \ + if (0) \ + wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ + 0; \ +}) +#endif + +/* + * wiphy_WARN() acts like wiphy_printk(), but with the key difference + * of using a WARN/WARN_ON to get the message out, including the + * file/line information and a backtrace. + */ +#define wiphy_WARN(wiphy, format, args...) \ + WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args); + +#endif + #endif /* __NET_CFG80211_H */ diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h index 511a459ec10f8684cab539669f6e64c85e18967b..0916bbf3bdff065fbc91d191c05aeecd3019ad40 100644 --- a/include/net/dn_dev.h +++ b/include/net/dn_dev.h @@ -101,7 +101,7 @@ struct dn_short_packet { __le16 dstnode; __le16 srcnode; __u8 forward; -} __attribute__((packed)); +} __packed; struct dn_long_packet { __u8 msgflg; @@ -115,7 +115,7 @@ struct dn_long_packet { __u8 visit_ct; __u8 s_class; __u8 pt; -} __attribute__((packed)); +} __packed; /*------------------------- DRP - Routing messages ---------------------*/ @@ -132,7 +132,7 @@ struct endnode_hello_message { __u8 mpd; __u8 datalen; __u8 data[2]; -} __attribute__((packed)); +} __packed; struct rtnode_hello_message { __u8 msgflg; @@ -144,7 +144,7 @@ struct rtnode_hello_message { __u8 area; __le16 timer; __u8 mpd; -} __attribute__((packed)); +} __packed; extern void dn_dev_init(void); diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h index 17d43d2db5ec2e9883c981e91c24f53a6612379c..e43a2893f132f0767849271f3eac0f625dbd8828 100644 --- a/include/net/dn_nsp.h +++ b/include/net/dn_nsp.h @@ -74,18 +74,18 @@ struct nsp_data_seg_msg { __u8 msgflg; __le16 dstaddr; __le16 srcaddr; -} __attribute__((packed)); +} __packed; struct nsp_data_opt_msg { __le16 acknum; __le16 segnum; __le16 lsflgs; -} __attribute__((packed)); +} __packed; struct nsp_data_opt_msg1 { __le16 acknum; __le16 segnum; -} __attribute__((packed)); +} __packed; /* Acknowledgment Message (data/other data) */ @@ -94,13 +94,13 @@ struct nsp_data_ack_msg { __le16 dstaddr; __le16 srcaddr; __le16 acknum; -} __attribute__((packed)); +} __packed; /* Connect Acknowledgment Message */ struct nsp_conn_ack_msg { __u8 msgflg; __le16 dstaddr; -} __attribute__((packed)); +} __packed; /* Connect Initiate/Retransmit Initiate/Connect Confirm */ @@ -117,7 +117,7 @@ struct nsp_conn_init_msg { #define NSP_FC_MASK 0x0c /* FC type mask */ __u8 info; __le16 segsize; -} __attribute__((packed)); +} __packed; /* Disconnect Initiate/Disconnect Confirm */ struct nsp_disconn_init_msg { @@ -125,7 +125,7 @@ struct nsp_disconn_init_msg { __le16 dstaddr; __le16 srcaddr; __le16 reason; -} __attribute__((packed)); +} __packed; @@ -135,7 +135,7 @@ struct srcobj_fmt { __le16 grpcode; __le16 usrcode; __u8 dlen; -} __attribute__((packed)); +} __packed; /* * A collection of functions for manipulating the sequence diff --git a/include/net/dn_route.h b/include/net/dn_route.h index 60c9f22d86943f0aa19ecdc18d513a24e763b9b1..ccadab3aa3f6c5948e484062c82c022b5dbf132b 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -65,9 +65,7 @@ extern void dn_rt_cache_flush(int delay); * packets to the originating host. */ struct dn_route { - union { - struct dst_entry dst; - } u; + struct dst_entry dst; struct flowi fl; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index eb551baafc044c8b0d9938a2b794968a87384917..f7dcd2c7041266fadc9794efc50136c44c36d4f4 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -68,26 +68,15 @@ struct genl_info { #endif }; -#ifdef CONFIG_NET_NS static inline struct net *genl_info_net(struct genl_info *info) { - return info->_net; + return read_pnet(&info->_net); } static inline void genl_info_net_set(struct genl_info *info, struct net *net) { - info->_net = net; + write_pnet(&info->_net, net); } -#else -static inline struct net *genl_info_net(struct genl_info *info) -{ - return &init_net; -} - -static inline void genl_info_net_set(struct genl_info *info, struct net *net) -{ -} -#endif /** * struct genl_ops - generic netlink operations diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 18c773286b91744ccf0447b60597db3acc353f9a..22fac9892b160c06cecb83d8b98c9fcef155ebd8 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -1,8 +1,8 @@ #ifndef _INET_COMMON_H #define _INET_COMMON_H -extern const struct proto_ops inet_stream_ops; -extern const struct proto_ops inet_dgram_ops; +extern const struct proto_ops inet_stream_ops; +extern const struct proto_ops inet_dgram_ops; /* * INET4 prototypes used by INET6 @@ -13,37 +13,28 @@ struct sock; struct sockaddr; struct socket; -extern int inet_release(struct socket *sock); -extern int inet_stream_connect(struct socket *sock, - struct sockaddr * uaddr, - int addr_len, int flags); -extern int inet_dgram_connect(struct socket *sock, - struct sockaddr * uaddr, - int addr_len, int flags); -extern int inet_accept(struct socket *sock, - struct socket *newsock, int flags); -extern int inet_sendmsg(struct kiocb *iocb, - struct socket *sock, - struct msghdr *msg, - size_t size); -extern int inet_shutdown(struct socket *sock, int how); -extern int inet_listen(struct socket *sock, int backlog); - -extern void inet_sock_destruct(struct sock *sk); - -extern int inet_bind(struct socket *sock, - struct sockaddr *uaddr, int addr_len); -extern int inet_getname(struct socket *sock, - struct sockaddr *uaddr, - int *uaddr_len, int peer); -extern int inet_ioctl(struct socket *sock, - unsigned int cmd, unsigned long arg); - -extern int inet_ctl_sock_create(struct sock **sk, - unsigned short family, - unsigned short type, - unsigned char protocol, - struct net *net); +extern int inet_release(struct socket *sock); +extern int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr, + int addr_len, int flags); +extern int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, + int addr_len, int flags); +extern int inet_accept(struct socket *sock, struct socket *newsock, int flags); +extern int inet_sendmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t size); +extern ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); +extern int inet_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t size, int flags); +extern int inet_shutdown(struct socket *sock, int how); +extern int inet_listen(struct socket *sock, int backlog); +extern void inet_sock_destruct(struct sock *sk); +extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +extern int inet_getname(struct socket *sock, struct sockaddr *uaddr, + int *uaddr_len, int peer); +extern int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +extern int inet_ctl_sock_create(struct sock **sk, unsigned short family, + unsigned short type, unsigned char protocol, + struct net *net); static inline void inet_ctl_sock_destroy(struct sock *sk) { @@ -51,5 +42,3 @@ static inline void inet_ctl_sock_destroy(struct sock *sk) } #endif - - diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 39f2dc9439086b83e45b6495e712ebfe91daa2a4..16ff29a7bb30cfbf5e1581c1388e321e9d9f2a1b 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -20,6 +20,7 @@ struct inet_frag_queue { atomic_t refcnt; struct timer_list timer; /* when will this queue expire? */ struct sk_buff *fragments; /* list of received fragments */ + struct sk_buff *fragments_tail; ktime_t stamp; int len; /* total length of orig datagram */ int meat; diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 1653de515ceec95c8e9017edfdda532d4a851547..1989cfd7405fccfc6839f2742eb45b4b2d7f8712 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -137,7 +137,8 @@ struct inet_sock { hdrincl:1, mc_loop:1, transparent:1, - mc_all:1; + mc_all:1, + nodefrag:1; int mc_index; __be32 mc_addr; struct ip_mc_socklist *mc_list; diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 87b1df0d4d8ce7419a0981593dad4fd93f3ae213..417d0c894f29e29c096a9bf5ad7888b42d54976a 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -22,10 +22,21 @@ struct inet_peer { __u32 dtime; /* the time of last use of not * referenced entries */ atomic_t refcnt; - atomic_t rid; /* Frag reception counter */ - atomic_t ip_id_count; /* IP ID for the next packet */ - __u32 tcp_ts; - __u32 tcp_ts_stamp; + /* + * Once inet_peer is queued for deletion (refcnt == -1), following fields + * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp + * We can share memory with rcu_head to keep inet_peer small + * (less then 64 bytes) + */ + union { + struct { + atomic_t rid; /* Frag reception counter */ + atomic_t ip_id_count; /* IP ID for the next packet */ + __u32 tcp_ts; + __u32 tcp_ts_stamp; + }; + struct rcu_head rcu; + }; }; void inet_initpeers(void) __init; @@ -36,10 +47,21 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create); /* can be called from BH context or outside */ extern void inet_putpeer(struct inet_peer *p); +/* + * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, + * tcp_ts_stamp if no refcount is taken on inet_peer + */ +static inline void inet_peer_refcheck(const struct inet_peer *p) +{ + WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); +} + + /* can be called with or without local BH being disabled */ static inline __u16 inet_getid(struct inet_peer *p, int more) { more++; + inet_peer_refcheck(p); return atomic_add_return(more, &p->ip_id_count) - more; } diff --git a/include/net/ip.h b/include/net/ip.h index 452f229c380aea77f0adc5e018ef2972d7fcd368..890f9725d68156b8d4df25adea50880ae3bf46a0 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -61,11 +61,14 @@ struct ipcm_cookie { struct ip_ra_chain { struct ip_ra_chain *next; struct sock *sk; - void (*destructor)(struct sock *); + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct rcu_head rcu; }; extern struct ip_ra_chain *ip_ra_chain; -extern rwlock_t ip_ra_lock; /* IP flags. */ #define IP_CE 0x8000 /* Flag: "Congestion" */ @@ -162,12 +165,12 @@ struct ipv4_config { }; extern struct ipv4_config ipv4_config; -#define IP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.ip_statistics, field) -#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.ip_statistics, field) -#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.ip_statistics, field, val) -#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS_BH((net)->mib.ip_statistics, field, val) -#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS((net)->mib.ip_statistics, field, val) -#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS_BH((net)->mib.ip_statistics, field, val) +#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) +#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) +#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) +#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) +#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) +#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) #define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) #define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field) @@ -175,7 +178,15 @@ extern struct ipv4_config ipv4_config; #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) extern unsigned long snmp_fold_field(void __percpu *mib[], int offt); -extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize); +#if BITS_PER_LONG==32 +extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off); +#else +static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off) +{ + return snmp_fold_field(mib, offt); +} +#endif +extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align); extern void snmp_mib_free(void __percpu *ptr[2]); extern struct local_ports { diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 4b1dc1161c37ca162eb3d69e82737192ebf8d220..062a823d311cd55ecbcc4d40062b21d25ce680e2 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -84,13 +84,11 @@ struct rt6key { struct fib6_table; struct rt6_info { - union { - struct dst_entry dst; - } u; + struct dst_entry dst; -#define rt6i_dev u.dst.dev -#define rt6i_nexthop u.dst.neighbour -#define rt6i_expires u.dst.expires +#define rt6i_dev dst.dev +#define rt6i_nexthop dst.neighbour +#define rt6i_expires dst.expires /* * Tail elements of dst_entry (__refcnt etc.) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index fbf9d1cda27b07d178a51ee30a21bf1ad122fb0a..fc94ec568a50a54db39053b9bdcf0d862c7eba9a 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -27,6 +27,6 @@ struct ipv6_tlv_tnl_enc_lim { __u8 type; /* type-code for option */ __u8 length; /* option length */ __u8 encap_limit; /* tunnel encapsulation limit */ -} __attribute__ ((packed)); +} __packed; #endif diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index fe82b1e10a296b4892d5ccb9c4ad7ba94b8f82a9..a4747a0f7303ab73b1dffa7cd9a66740c0ef0ffe 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -632,10 +632,22 @@ extern struct ip_vs_conn *ip_vs_ct_in_get (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, const union nf_inet_addr *d_addr, __be16 d_port); +struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, + const struct ip_vs_iphdr *iph, + unsigned int proto_off, + int inverse); + extern struct ip_vs_conn *ip_vs_conn_out_get (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, const union nf_inet_addr *d_addr, __be16 d_port); +struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, + const struct ip_vs_iphdr *iph, + unsigned int proto_off, + int inverse); + /* put back the conn without restarting its timer */ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) { @@ -736,8 +748,6 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc); extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb); extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb); -extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, - char *o_buf, int o_len, char *n_buf, int n_len); extern int ip_vs_app_init(void); extern void ip_vs_app_cleanup(void); diff --git a/include/net/ipip.h b/include/net/ipip.h index 11e8513d2d071d755be2a6dc4af5d682e6ad647b..65caea8b414f0c5c60f7c4331bd7d7d6684ba9fb 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -50,7 +50,7 @@ struct ip_tunnel_prl_entry { int pkt_len = skb->len - skb_transport_offset(skb); \ \ skb->ip_summed = CHECKSUM_NONE; \ - ip_select_ident(iph, &rt->u.dst, NULL); \ + ip_select_ident(iph, &rt->dst, NULL); \ \ err = ip_local_out(skb); \ if (likely(net_xmit_eval(err) == 0)) { \ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 2600b69757b8c63d6ab8c570b9f0e26f195468c4..1f841241099802145a8e1cff634e7e26c2f7c16c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -136,17 +136,17 @@ extern struct ctl_path net_ipv6_ctl_path[]; /* MIBs */ #define IP6_INC_STATS(net, idev,field) \ - _DEVINC(net, ipv6, , idev, field) + _DEVINC(net, ipv6, 64, idev, field) #define IP6_INC_STATS_BH(net, idev,field) \ - _DEVINC(net, ipv6, _BH, idev, field) + _DEVINC(net, ipv6, 64_BH, idev, field) #define IP6_ADD_STATS(net, idev,field,val) \ - _DEVADD(net, ipv6, , idev, field, val) + _DEVADD(net, ipv6, 64, idev, field, val) #define IP6_ADD_STATS_BH(net, idev,field,val) \ - _DEVADD(net, ipv6, _BH, idev, field, val) + _DEVADD(net, ipv6, 64_BH, idev, field, val) #define IP6_UPD_PO_STATS(net, idev,field,val) \ - _DEVUPD(net, ipv6, , idev, field, val) + _DEVUPD(net, ipv6, 64, idev, field, val) #define IP6_UPD_PO_STATS_BH(net, idev,field,val) \ - _DEVUPD(net, ipv6, _BH, idev, field, val) + _DEVUPD(net, ipv6, 64_BH, idev, field, val) #define ICMP6_INC_STATS(net, idev, field) \ _DEVINC(net, icmpv6, , idev, field) #define ICMP6_INC_STATS_BH(net, idev, field) \ @@ -551,6 +551,10 @@ extern int ipv6_ext_hdr(u8 nexthdr); extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); +extern struct in6_addr *fl6_update_dst(struct flowi *fl, + const struct ipv6_txoptions *opt, + struct in6_addr *orig); + /* * socket options (ipv6_sockglue.c) */ diff --git a/include/net/ipx.h b/include/net/ipx.h index ef51a668ba191b6c2d3d4a62df21a6b47482c363..05d7e4a88b491edbd74420b5b4acac70aac7d71e 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -27,9 +27,9 @@ struct ipx_address { #define IPX_MAX_PPROP_HOPS 8 struct ipxhdr { - __be16 ipx_checksum __attribute__ ((packed)); + __be16 ipx_checksum __packed; #define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF) - __be16 ipx_pktsize __attribute__ ((packed)); + __be16 ipx_pktsize __packed; __u8 ipx_tctrl; __u8 ipx_type; #define IPX_TYPE_UNKNOWN 0x00 @@ -38,8 +38,8 @@ struct ipxhdr { #define IPX_TYPE_SPX 0x05 /* SPX protocol */ #define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */ #define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */ - struct ipx_address ipx_dest __attribute__ ((packed)); - struct ipx_address ipx_source __attribute__ ((packed)); + struct ipx_address ipx_dest __packed; + struct ipx_address ipx_source __packed; }; static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index 7e582061b230dd9dce935059d959572cbba7cc9a..3bed61d379a8aa1de70fe25935810c830cd70ac2 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h @@ -53,10 +53,6 @@ typedef __u32 magic_t; #ifndef IRDA_ALIGN # define IRDA_ALIGN __attribute__((aligned)) #endif -#ifndef IRDA_PACK -# define IRDA_PACK __attribute__((packed)) -#endif - #ifdef CONFIG_IRDA_DEBUG diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h index 641f88e848bd3d57ff446517afb1d596652ebf22..6b1dc4f8eca58a231ff481ee6fd5be8ac1a8f15c 100644 --- a/include/net/irda/irlap_frame.h +++ b/include/net/irda/irlap_frame.h @@ -85,7 +85,7 @@ struct discovery_t; struct disc_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct xid_frame { __u8 caddr; /* Connection address */ @@ -96,41 +96,41 @@ struct xid_frame { __u8 flags; /* Discovery flags */ __u8 slotnr; __u8 version; -} IRDA_PACK; +} __packed; struct test_frame { __u8 caddr; /* Connection address */ __u8 control; __le32 saddr; /* Source device address */ __le32 daddr; /* Destination device address */ -} IRDA_PACK; +} __packed; struct ua_frame { __u8 caddr; __u8 control; __le32 saddr; /* Source device address */ __le32 daddr; /* Dest device address */ -} IRDA_PACK; +} __packed; struct dm_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct rd_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct rr_frame { __u8 caddr; /* Connection address */ __u8 control; -} IRDA_PACK; +} __packed; struct i_frame { __u8 caddr; __u8 control; -} IRDA_PACK; +} __packed; struct snrm_frame { __u8 caddr; @@ -138,7 +138,7 @@ struct snrm_frame { __le32 saddr; __le32 daddr; __u8 ncaddr; -} IRDA_PACK; +} __packed; void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb); void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s, diff --git a/include/net/lib80211.h b/include/net/lib80211.h index fb4e2784857d93c23b5358803e7591345073f499..848cce1bb7a53177d46c54f50ffd74ecec551fc4 100644 --- a/include/net/lib80211.h +++ b/include/net/lib80211.h @@ -54,9 +54,6 @@ struct lib80211_crypto_ops { /* deinitialize crypto context and free allocated private data */ void (*deinit) (void *priv); - int (*build_iv) (struct sk_buff * skb, int hdr_len, - u8 *key, int keylen, void *priv); - /* encrypt/decrypt return < 0 on error or >= 0 on success. The return * value from decrypt_mpdu is passed as the keyidx value for * decrypt_msdu. skb must have enough head and tail room for the diff --git a/include/net/mac80211.h b/include/net/mac80211.h index de22cbfef23224c6aed205de5179c59644f5384c..b0787a1dea904c69c3ca5f29c0674b54053c41bb 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -146,6 +146,9 @@ struct ieee80211_low_level_stats { * enabled/disabled (beaconing modes) * @BSS_CHANGED_CQM: Connection quality monitor config changed * @BSS_CHANGED_IBSS: IBSS join status changed + * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed. + * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note + * that it is only ever disabled for station mode. */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -160,10 +163,19 @@ enum ieee80211_bss_change { BSS_CHANGED_BEACON_ENABLED = 1<<9, BSS_CHANGED_CQM = 1<<10, BSS_CHANGED_IBSS = 1<<11, + BSS_CHANGED_ARP_FILTER = 1<<12, + BSS_CHANGED_QOS = 1<<13, /* when adding here, make sure to change ieee80211_reconfig */ }; +/* + * The maximum number of IPv4 addresses listed for ARP filtering. If the number + * of addresses for an interface increase beyond this value, hardware ARP + * filtering will be disabled. + */ +#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4 + /** * struct ieee80211_bss_conf - holds the BSS's changing parameters * @@ -182,7 +194,9 @@ enum ieee80211_bss_change { * if the hardware cannot handle this it must set the * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag * @dtim_period: num of beacons before the next DTIM, for beaconing, - * not valid in station mode (cf. hw conf ps_dtim_period) + * valid in station mode only while @assoc is true and if also + * requested by %IEEE80211_HW_NEED_DTIM_PERIOD (cf. also hw conf + * @ps_dtim_period) * @timestamp: beacon timestamp * @beacon_int: beacon interval * @assoc_capability: capabilities taken from assoc resp @@ -199,6 +213,16 @@ enum ieee80211_bss_change { * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value * implies disabled * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis + * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The + * may filter ARP queries targeted for other addresses than listed here. + * The driver must allow ARP queries targeted for all address listed here + * to pass through. An empty list implies no ARP queries need to pass. + * @arp_addr_cnt: Number of addresses currently on the list. + * @arp_filter_enabled: Enable ARP filtering - if enabled, the hardware may + * filter ARP queries based on the @arp_addr_list, if disabled, the + * hardware must not perform any ARP filtering. Note, that the filter will + * be enabled also in promiscuous mode. + * @qos: This is a QoS-enabled BSS. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -219,6 +243,10 @@ struct ieee80211_bss_conf { s32 cqm_rssi_thold; u32 cqm_rssi_hyst; enum nl80211_channel_type channel_type; + __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; + u8 arp_addr_cnt; + bool arp_filter_enabled; + bool qos; }; /** @@ -312,9 +340,10 @@ enum mac80211_tx_control_flags { IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21), IEEE80211_TX_CTL_LDPC = BIT(22), IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24), -#define IEEE80211_TX_CTL_STBC_SHIFT 23 }; +#define IEEE80211_TX_CTL_STBC_SHIFT 23 + /** * enum mac80211_rate_control_flags - per-rate flags set by the * Rate Control algorithm. @@ -390,7 +419,7 @@ struct ieee80211_tx_rate { s8 idx; u8 count; u8 flags; -} __attribute__((packed)); +} __packed; /** * struct ieee80211_tx_info - skb transmit information @@ -412,8 +441,6 @@ struct ieee80211_tx_rate { * @driver_data: array of driver_data pointers * @ampdu_ack_len: number of acked aggregated frames. * relevant only if IEEE80211_TX_STAT_AMPDU was set. - * @ampdu_ack_map: block ack bit map for the aggregation. - * relevant only if IEEE80211_TX_STAT_AMPDU was set. * @ampdu_len: number of aggregated frames. * relevant only if IEEE80211_TX_STAT_AMPDU was set. * @ack_signal: signal strength of the ACK frame @@ -448,10 +475,9 @@ struct ieee80211_tx_info { struct { struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; u8 ampdu_ack_len; - u64 ampdu_ack_map; int ack_signal; u8 ampdu_len; - /* 7 bytes free */ + /* 15 bytes free */ } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -601,15 +627,14 @@ struct ieee80211_rx_status { * may turn the device off as much as possible. Typically, this flag will * be set when an interface is set UP but not associated or scanning, but * it can also be unset in that case when monitor interfaces are active. - * @IEEE80211_CONF_QOS: Enable 802.11e QoS also know as WMM (Wireless - * Multimedia). On some drivers (iwlwifi is one of know) we have - * to enable/disable QoS explicitly. + * @IEEE80211_CONF_OFFCHANNEL: The device is currently not on its main + * operating channel. */ enum ieee80211_conf_flags { IEEE80211_CONF_MONITOR = (1<<0), IEEE80211_CONF_PS = (1<<1), IEEE80211_CONF_IDLE = (1<<2), - IEEE80211_CONF_QOS = (1<<3), + IEEE80211_CONF_OFFCHANNEL = (1<<3), }; @@ -624,7 +649,6 @@ enum ieee80211_conf_flags { * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed - * @IEEE80211_CONF_CHANGE_QOS: Quality of service was enabled or disabled */ enum ieee80211_conf_changed { IEEE80211_CONF_CHANGE_SMPS = BIT(1), @@ -635,7 +659,6 @@ enum ieee80211_conf_changed { IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), IEEE80211_CONF_CHANGE_IDLE = BIT(8), - IEEE80211_CONF_CHANGE_QOS = BIT(9), }; /** @@ -676,9 +699,6 @@ enum ieee80211_smps_mode { * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the * powersave documentation below. This variable is valid only when * the CONF_PS flag is set. - * @dynamic_ps_forced_timeout: The dynamic powersave timeout (in ms) configured - * by cfg80211 (essentially, wext) If set, this value overrules the value - * chosen by mac80211 based on ps qos network latency. * * @power_level: requested transmit power (in dBm) * @@ -698,7 +718,7 @@ enum ieee80211_smps_mode { */ struct ieee80211_conf { u32 flags; - int power_level, dynamic_ps_timeout, dynamic_ps_forced_timeout; + int power_level, dynamic_ps_timeout; int max_sleep_period; u16 listen_interval; @@ -815,7 +835,6 @@ enum ieee80211_key_flags { * encrypted in hardware. * @alg: The key algorithm. * @flags: key flags, see &enum ieee80211_key_flags. - * @ap_addr: AP's MAC address * @keyidx: the key index (0-3) * @keylen: key material length * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) @@ -881,16 +900,12 @@ struct ieee80211_sta { * enum sta_notify_cmd - sta notify command * * Used with the sta_notify() callback in &struct ieee80211_ops, this - * indicates addition and removal of a station to station table, - * or if a associated station made a power state transition. + * indicates if an associated station made a power state transition. * - * @STA_NOTIFY_ADD: (DEPRECATED) a station was added to the station table - * @STA_NOTIFY_REMOVE: (DEPRECATED) a station being removed from the station table * @STA_NOTIFY_SLEEP: a station is now sleeping * @STA_NOTIFY_AWAKE: a sleeping station woke up */ enum sta_notify_cmd { - STA_NOTIFY_ADD, STA_NOTIFY_REMOVE, STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE, }; @@ -1014,6 +1029,9 @@ enum ieee80211_tkip_key_type { * connection quality related parameters, such as the RSSI level and * provide notifications if configured trigger levels are reached. * + * @IEEE80211_HW_NEED_DTIM_PERIOD: + * This device needs to know the DTIM period for the BSS before + * associating. */ enum ieee80211_hw_flags { IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, @@ -1023,7 +1041,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, IEEE80211_HW_SIGNAL_UNSPEC = 1<<5, IEEE80211_HW_SIGNAL_DBM = 1<<6, - /* use this hole */ + IEEE80211_HW_NEED_DTIM_PERIOD = 1<<7, IEEE80211_HW_SPECTRUM_MGMT = 1<<8, IEEE80211_HW_AMPDU_AGGREGATION = 1<<9, IEEE80211_HW_SUPPORTS_PS = 1<<10, @@ -1260,6 +1278,15 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS * enabled whenever user has enabled powersave. * + * Some hardware need to toggle a single shared antenna between WLAN and + * Bluetooth to facilitate co-existence. These types of hardware set + * limitations on the use of host controlled dynamic powersave whenever there + * is simultaneous WLAN and Bluetooth traffic. For these types of hardware, the + * driver may request temporarily going into full power save, in order to + * enable toggling the antenna between BT and WLAN. If the driver requests + * disabling dynamic powersave, the @dynamic_ps_timeout value will be + * temporarily set to zero until the driver re-enables dynamic powersave. + * * Driver informs U-APSD client support by enabling * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS @@ -1451,7 +1478,7 @@ enum ieee80211_filter_flags { * * Note that drivers MUST be able to deal with a TX aggregation * session being stopped even before they OK'ed starting it by - * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer + * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer * might receive the addBA frame and send a delBA right away! * * @IEEE80211_AMPDU_RX_START: start Rx aggregation @@ -1636,7 +1663,7 @@ enum ieee80211_ampdu_mlme_action { * is the first frame we expect to perform the action on. Notice * that TX/RX_STOP can pass NULL for this parameter. * Returns a negative error code on failure. - * The callback must be atomic. + * The callback can sleep. * * @get_survey: Return per-channel survey information * @@ -2306,17 +2333,6 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, */ int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid); -/** - * ieee80211_start_tx_ba_cb - low level driver ready to aggregate. - * @vif: &struct ieee80211_vif pointer from the add_interface callback - * @ra: receiver address of the BA session recipient. - * @tid: the TID to BA on. - * - * This function must be called by low level driver once it has - * finished with preparations for the BA session. - */ -void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); - /** * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate. * @vif: &struct ieee80211_vif pointer from the add_interface callback @@ -2324,8 +2340,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); * @tid: the TID to BA on. * * This function must be called by low level driver once it has - * finished with preparations for the BA session. - * This version of the function is IRQ-safe. + * finished with preparations for the BA session. It can be called + * from any context. */ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid); @@ -2334,27 +2350,14 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, * ieee80211_stop_tx_ba_session - Stop a Block Ack session. * @sta: the station whose BA session to stop * @tid: the TID to stop BA. - * @initiator: if indicates initiator DELBA frame will be sent. * - * Return: error if no sta with matching da found, success otherwise + * Return: negative error if the TID is invalid, or no aggregation active * * Although mac80211/low level driver/user space application can estimate * the need to stop aggregation on a certain RA/TID, the session level * will be managed by the mac80211. */ -int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid, - enum ieee80211_back_parties initiator); - -/** - * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate. - * @vif: &struct ieee80211_vif pointer from the add_interface callback - * @ra: receiver address of the BA session recipient. - * @tid: the desired TID to BA on. - * - * This function must be called by low level driver once it has - * finished with preparations for the BA session tear down. - */ -void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); +int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid); /** * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate. @@ -2363,8 +2366,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); * @tid: the desired TID to BA on. * * This function must be called by low level driver once it has - * finished with preparations for the BA session tear down. - * This version of the function is IRQ-safe. + * finished with preparations for the BA session tear down. It + * can be called from any context. */ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid); @@ -2459,6 +2462,36 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif); */ void ieee80211_connection_loss(struct ieee80211_vif *vif); +/** + * ieee80211_disable_dyn_ps - force mac80211 to temporarily disable dynamic psm + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Some hardware require full power save to manage simultaneous BT traffic + * on the WLAN frequency. Full PSM is required periodically, whenever there are + * burst of BT traffic. The hardware gets information of BT traffic via + * hardware co-existence lines, and consequentially requests mac80211 to + * (temporarily) enter full psm. + * This function will only temporarily disable dynamic PS, not enable PSM if + * it was not already enabled. + * The driver must make sure to re-enable dynamic PS using + * ieee80211_enable_dyn_ps() if the driver has disabled it. + * + */ +void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif); + +/** + * ieee80211_enable_dyn_ps - restore dynamic psm after being disabled + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * This function restores dynamic PS after being temporarily disabled via + * ieee80211_disable_dyn_ps(). Each ieee80211_disable_dyn_ps() call must + * be coupled with an eventual call to this function. + * + */ +void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif); + /** * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring * rssi threshold triggered diff --git a/include/net/mip6.h b/include/net/mip6.h index a83ad1982a90893abb1d369f8b679b1eed9fa24a..26ba99b5a4b139cadb746c95294be3f6789f4c1b 100644 --- a/include/net/mip6.h +++ b/include/net/mip6.h @@ -39,7 +39,7 @@ struct ip6_mh { __u16 ip6mh_cksum; /* Followed by type specific messages */ __u8 data[0]; -} __attribute__ ((__packed__)); +} __packed; #define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */ #define IP6_MH_TYPE_HOTI 1 /* HOTI Message */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index f76f22d057216bb3e28b68a3e68fd4c002e50b30..895997bc2ead8f248818dc6866952d6152e3b730 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -82,7 +82,7 @@ struct ra_msg { struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; -} __attribute__((__packed__)); +} __packed; extern int ndisc_init(void); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index eb21340a573b4906d267a18bf69bbf37bdebb8b4..242879b6c4df8a4f539c3746dabb26bfe08a9dd8 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -151,7 +151,7 @@ struct neigh_table { void (*proxy_redo)(struct sk_buff *skb); char *id; struct neigh_parms parms; - /* HACK. gc_* shoul follow parms without a gap! */ + /* HACK. gc_* should follow parms without a gap! */ int gc_interval; int gc_thresh1; int gc_thresh2; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index bde095f7e845754e86bde0f9860866622b0e7aca..e624dae54fa49b7d713b78641c7f18c2e3cfbae2 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -152,11 +152,7 @@ extern struct net init_net; static inline struct net *nf_ct_net(const struct nf_conn *ct) { -#ifdef CONFIG_NET_NS - return ct->ct_net; -#else - return &init_net; -#endif + return read_pnet(&ct->ct_net); } /* Alter reply tuple (maybe alter helper). */ @@ -261,7 +257,12 @@ extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, u32 seq); /* Fake conntrack entry for untracked connections */ -extern struct nf_conn nf_conntrack_untracked; +DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); +static inline struct nf_conn *nf_ct_untracked_get(void) +{ + return &__raw_get_cpu_var(nf_conntrack_untracked); +} +extern void nf_ct_untracked_status_or(unsigned long bits); /* Iterate over all conntracks: if iter returns true, it's deleted. */ extern void @@ -289,9 +290,9 @@ static inline int nf_ct_is_dying(struct nf_conn *ct) return test_bit(IPS_DYING_BIT, &ct->status); } -static inline int nf_ct_is_untracked(const struct sk_buff *skb) +static inline int nf_ct_is_untracked(const struct nf_conn *ct) { - return (skb->nfct == &nf_conntrack_untracked.ct_general); + return test_bit(IPS_UNTRACKED_BIT, &ct->status); } extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h index 03e218f0be43a66ad65f82d244e1ff7db5391e78..4e9c63a20db22dd498933745ed9cf2627e74fd47 100644 --- a/include/net/netfilter/nf_conntrack_acct.h +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -45,6 +45,18 @@ struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) extern unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir); +/* Check if connection tracking accounting is enabled */ +static inline bool nf_ct_acct_enabled(struct net *net) +{ + return net->ct.sysctl_acct != 0; +} + +/* Enable/disable connection tracking accounting */ +static inline void nf_ct_set_acct(struct net *net, bool enable) +{ + net->ct.sysctl_acct = enable; +} + extern int nf_conntrack_acct_init(struct net *net); extern void nf_conntrack_acct_fini(struct net *net); diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 3d7524fba1946d2993033f44b8330f210eaa4bb4..aced085132e7e52591f05ba1f1529dbba420465b 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -60,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) struct nf_conn *ct = (struct nf_conn *)skb->nfct; int ret = NF_ACCEPT; - if (ct && ct != &nf_conntrack_untracked) { + if (ct && !nf_ct_is_untracked(ct)) { if (!nf_ct_is_confirmed(ct)) ret = __nf_conntrack_confirm(skb); if (likely(ret == NF_ACCEPT)) diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 32d15bd6efa3c573030e899bb979d98dcc811ee9..0772d296dfdb12b26a4bd8d97fecdd914c088536 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -28,9 +28,14 @@ struct nf_ct_ext { char data[0]; }; -static inline int nf_ct_ext_exist(const struct nf_conn *ct, u8 id) +static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id) { - return (ct->ext && ct->ext->offset[id]); + return !!ext->offset[id]; +} + +static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) +{ + return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); } static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h index c398017ccfa3d364e1d43b5b541405f0a1f575e4..df17bac46bf5ef81cc4a1f3114e99eb54a913758 100644 --- a/include/net/netfilter/nf_nat_protocol.h +++ b/include/net/netfilter/nf_nat_protocol.h @@ -27,9 +27,9 @@ struct nf_nat_protocol { /* Alter the per-proto part of the tuple (depending on maniptype), to give a unique tuple in the given range if - possible; return false if not. Per-protocol part of tuple - is initialized to the incoming packet. */ - bool (*unique_tuple)(struct nf_conntrack_tuple *tuple, + possible. Per-protocol part of tuple is initialized to the + incoming packet. */ + void (*unique_tuple)(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct); @@ -63,7 +63,7 @@ extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, const union nf_conntrack_man_proto *min, const union nf_conntrack_man_proto *max); -extern bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, +extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct, diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h index e4a18ae361c62fd543999554461299ad86ba3714..2890bdc4cd9238f23ac6337c0a992fb1b0afb9f5 100644 --- a/include/net/netfilter/nf_nat_rule.h +++ b/include/net/netfilter/nf_nat_rule.h @@ -12,6 +12,4 @@ extern int nf_nat_rule_find(struct sk_buff *skb, const struct net_device *out, struct nf_conn *ct); -extern unsigned int -alloc_null_binding(struct nf_conn *ct, unsigned int hooknum); #endif /* _NF_NAT_RULE_H */ diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h index b0569ff0775ed649fd07fa460bfcbd43cc91f746..e2dec42c2db2b5c07afc98165b140b76aa599f56 100644 --- a/include/net/netfilter/nfnetlink_log.h +++ b/include/net/netfilter/nfnetlink_log.h @@ -10,5 +10,7 @@ nfulnl_log_packet(u_int8_t pf, const struct nf_loginfo *li_user, const char *prefix); +#define NFULNL_COPY_DISABLED 0xff + #endif /* _KER_NFNETLINK_LOG_H */ diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h index ddbf37e19616a486b73b15a7e5314c47e1ed80a8..5a2978d1cb22ae8713d43688b74370e44727725e 100644 --- a/include/net/netfilter/xt_rateest.h +++ b/include/net/netfilter/xt_rateest.h @@ -2,13 +2,18 @@ #define _XT_RATEEST_H struct xt_rateest { + /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */ + struct gnet_stats_basic_packed bstats; + spinlock_t lock; + /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */ + struct gnet_stats_rate_est rstats; + + /* following fields not accessed in hot path */ struct hlist_node list; char name[IFNAMSIZ]; unsigned int refcnt; - spinlock_t lock; struct gnet_estimator params; - struct gnet_stats_rate_est rstats; - struct gnet_stats_basic_packed bstats; + struct rcu_head rcu; }; extern struct xt_rateest *xt_rateest_lookup(const char *name); diff --git a/include/net/netlink.h b/include/net/netlink.h index 4fc05b58503eb4beab8fc20b3ab0b762f9f3d293..f3b201d335b3532858c846a8aadb1672de80dce3 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -35,7 +35,7 @@ * nlmsg_new() create a new netlink message * nlmsg_put() add a netlink message to an skb * nlmsg_put_answer() callback based nlmsg_put() - * nlmsg_end() finanlize netlink message + * nlmsg_end() finalize netlink message * nlmsg_get_pos() return current position in message * nlmsg_trim() trim part of message * nlmsg_cancel() cancel message construction diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index d7b989ca3d6320e3c7f7abe0f9d25d5565eadb6e..2d16783d5e201232d8ff47c1165d767ea91435de 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h @@ -34,6 +34,7 @@ struct phonet_device { struct list_head list; struct net_device *netdev; DECLARE_BITMAP(addrs, 64); + struct rcu_head rcu; }; int phonet_device_init(void); diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 9d4d87cc970e3b487c5c0879008bb0588a0e87dd..d9549af6929a1d4c7567ba94a1811cd31a996ac4 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -95,7 +95,7 @@ extern void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { - if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) + if (qdisc_run_begin(q)) __qdisc_run(q); } diff --git a/include/net/regulatory.h b/include/net/regulatory.h index f873ee37f7e4d5e341d1fcd3a303f15f82b08579..9e103a4e91eec69fb07c82cb9b9e5a02875cadc4 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -54,7 +54,6 @@ struct regulatory_request { enum nl80211_reg_initiator initiator; char alpha2[2]; bool intersect; - u32 country_ie_checksum; enum environment_cap country_ie_env; struct list_head list; }; diff --git a/include/net/route.h b/include/net/route.h index af6cf4b4c9dc53aef5d8d1a21abb2cf71a29045a..bd732d62e1c3a3181c7c324d695b02e1548970e0 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -50,9 +50,7 @@ struct fib_nh; struct inet_peer; struct rtable { - union { - struct dst_entry dst; - } u; + struct dst_entry dst; /* Cache lookup keys */ struct flowi fl; @@ -144,7 +142,7 @@ extern void fib_add_ifaddr(struct in_ifaddr *); static inline void ip_rt_put(struct rtable * rt) { if (rt) - dst_release(&rt->u.dst); + dst_release(&rt->dst); } #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 433604bb3fe8a70df241dcdc1370a867fd57ed92..3c8728aaab4e061a53a80fd12810548c25fed168 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -23,11 +23,17 @@ struct qdisc_rate_table { }; enum qdisc_state_t { - __QDISC_STATE_RUNNING, __QDISC_STATE_SCHED, __QDISC_STATE_DEACTIVATED, }; +/* + * following bits are only changed while qdisc lock is held + */ +enum qdisc___state_t { + __QDISC___STATE_RUNNING, +}; + struct qdisc_size_table { struct list_head list; struct tc_sizespec szopts; @@ -72,10 +78,27 @@ struct Qdisc { unsigned long state; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; + unsigned long __state; struct gnet_stats_queue qstats; - struct rcu_head rcu_head; + struct rcu_head rcu_head; + spinlock_t busylock; }; +static inline bool qdisc_is_running(struct Qdisc *qdisc) +{ + return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state); +} + +static inline bool qdisc_run_begin(struct Qdisc *qdisc) +{ + return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state); +} + +static inline void qdisc_run_end(struct Qdisc *qdisc) +{ + __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state); +} + struct Qdisc_class_ops { /* Child qdisc manipulation */ struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); @@ -583,9 +606,16 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) } #ifdef CONFIG_NET_CLS_ACT -static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) +static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, + int action) { - struct sk_buff *n = skb_clone(skb, gfp_mask); + struct sk_buff *n; + + if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) && + !skb_shared(skb)) + n = skb_get(skb); + else + n = skb_clone(skb, gfp_mask); if (n) { n->tc_verd = SET_TC_VERD(n->tc_verd, 0); diff --git a/include/net/scm.h b/include/net/scm.h index 8360e47aa7e322336099d09163db1b4254e47016..31656506d967a4a2349a74a158cdb3fa3ecb6986 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -19,8 +19,10 @@ struct scm_fp_list { }; struct scm_cookie { - struct ucred creds; /* Skb credentials */ + struct pid *pid; /* Skb credentials */ + const struct cred *cred; struct scm_fp_list *fp; /* Passed files */ + struct ucred creds; /* Skb credentials */ #ifdef CONFIG_SECURITY_NETWORK u32 secid; /* Passed security ID */ #endif @@ -42,8 +44,27 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co { } #endif /* CONFIG_SECURITY_NETWORK */ +static __inline__ void scm_set_cred(struct scm_cookie *scm, + struct pid *pid, const struct cred *cred) +{ + scm->pid = get_pid(pid); + scm->cred = get_cred(cred); + cred_to_ucred(pid, cred, &scm->creds); +} + +static __inline__ void scm_destroy_cred(struct scm_cookie *scm) +{ + put_pid(scm->pid); + scm->pid = NULL; + + if (scm->cred) + put_cred(scm->cred); + scm->cred = NULL; +} + static __inline__ void scm_destroy(struct scm_cookie *scm) { + scm_destroy_cred(scm); if (scm && scm->fp) __scm_destroy(scm); } @@ -51,10 +72,7 @@ static __inline__ void scm_destroy(struct scm_cookie *scm) static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) { - struct task_struct *p = current; - scm->creds.uid = current_uid(); - scm->creds.gid = current_gid(); - scm->creds.pid = task_tgid_vnr(p); + scm_set_cred(scm, task_tgid(current), current_cred()); scm->fp = NULL; unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) @@ -96,6 +114,8 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, if (test_bit(SOCK_PASSCRED, &sock->flags)) put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds); + scm_destroy_cred(scm); + scm_passec(sock, msg, scm); if (!scm->fp) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 4b860116e096c9e83d6d3cf289d0bec1bd3750d3..f9e7473613bdbb038aad94befb0807040c20b774 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -443,7 +443,7 @@ struct sctp_signed_cookie { __u8 signature[SCTP_SECRET_SIZE]; __u32 __pad; /* force sctp_cookie alignment to 64 bits */ struct sctp_cookie c; -} __attribute__((packed)); +} __packed; /* This is another convenience type to allocate memory for address * params for the maximum size and pass such structures around @@ -488,7 +488,7 @@ typedef struct sctp_sender_hb_info { union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; -} __attribute__((packed)) sctp_sender_hb_info_t; +} __packed sctp_sender_hb_info_t; /* * RFC 2960 1.3.2 Sequenced Delivery within Streams diff --git a/include/net/snmp.h b/include/net/snmp.h index 899003d18db9e57ab8813176f4ea896a27376c49..a0e61806d480bed5d13198b9852c10d482c04556 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -47,15 +47,16 @@ struct snmp_mib { } /* - * We use all unsigned longs. Linux will soon be so reliable that even - * these will rapidly get too small 8-). Seriously consider the IpInReceives - * count on the 20Gb/s + networks people expect in a few years time! + * We use unsigned longs for most mibs but u64 for ipstats. */ +#include /* IPstats */ #define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX struct ipstats_mib { - unsigned long mibs[IPSTATS_MIB_MAX]; + /* mibs[] must be first field of struct ipstats_mib */ + u64 mibs[IPSTATS_MIB_MAX]; + struct u64_stats_sync syncp; }; /* ICMP */ @@ -155,4 +156,70 @@ struct linux_xfrm_mib { ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend;\ } while (0) + + +#if BITS_PER_LONG==32 + +#define SNMP_ADD_STATS64_BH(mib, field, addend) \ + do { \ + __typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->mibs[field] += addend; \ + u64_stats_update_end(&ptr->syncp); \ + } while (0) +#define SNMP_ADD_STATS64_USER(mib, field, addend) \ + do { \ + __typeof__(*mib[0]) *ptr; \ + preempt_disable(); \ + ptr = __this_cpu_ptr((mib)[1]); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->mibs[field] += addend; \ + u64_stats_update_end(&ptr->syncp); \ + preempt_enable(); \ + } while (0) +#define SNMP_ADD_STATS64(mib, field, addend) \ + do { \ + __typeof__(*mib[0]) *ptr; \ + preempt_disable(); \ + ptr = __this_cpu_ptr((mib)[!in_softirq()]); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->mibs[field] += addend; \ + u64_stats_update_end(&ptr->syncp); \ + preempt_enable(); \ + } while (0) +#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1) +#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1) +#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) +#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ + do { \ + __typeof__(*mib[0]) *ptr; \ + preempt_disable(); \ + ptr = __this_cpu_ptr((mib)[!in_softirq()]); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->mibs[basefield##PKTS]++; \ + ptr->mibs[basefield##OCTETS] += addend; \ + u64_stats_update_end(&ptr->syncp); \ + preempt_enable(); \ + } while (0) +#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ + do { \ + __typeof__(*mib[0]) *ptr; \ + ptr = __this_cpu_ptr((mib)[!in_softirq()]); \ + u64_stats_update_begin(&ptr->syncp); \ + ptr->mibs[basefield##PKTS]++; \ + ptr->mibs[basefield##OCTETS] += addend; \ + u64_stats_update_end(&ptr->syncp); \ + } while (0) +#else +#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field) +#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field) +#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field) +#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field) +#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend) +#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend) +#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend) +#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend) +#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend) +#endif + #endif diff --git a/include/net/sock.h b/include/net/sock.h index 0a691ea7654aefb403e25039f08641691d3de74c..a441c9cdd62540cd7100189c5fa7aea2f6b0b22b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -295,7 +295,8 @@ struct sock { unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; - struct ucred sk_peercred; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; struct sk_filter *sk_filter; @@ -771,6 +772,7 @@ struct proto { int *sysctl_wmem; int *sysctl_rmem; int max_header; + bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; @@ -1706,19 +1708,13 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e static inline struct net *sock_net(const struct sock *sk) { -#ifdef CONFIG_NET_NS - return sk->sk_net; -#else - return &init_net; -#endif + return read_pnet(&sk->sk_net); } static inline void sock_net_set(struct sock *sk, struct net *net) { -#ifdef CONFIG_NET_NS - sk->sk_net = net; -#endif + write_pnet(&sk->sk_net, net); } /* diff --git a/include/net/tcp.h b/include/net/tcp.h index a1449144848aad1e09a0de356df96f898cd651e8..df6a2eb20193f935bc72204ca22c7f2f21b5070f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -296,45 +296,30 @@ extern struct proto tcp_prot; #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) -extern void tcp_v4_err(struct sk_buff *skb, u32); - -extern void tcp_shutdown (struct sock *sk, int how); - -extern int tcp_v4_rcv(struct sk_buff *skb); - -extern int tcp_v4_remember_stamp(struct sock *sk); - -extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); - -extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size); -extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); - -extern int tcp_ioctl(struct sock *sk, - int cmd, - unsigned long arg); - -extern int tcp_rcv_state_process(struct sock *sk, - struct sk_buff *skb, - struct tcphdr *th, - unsigned len); - -extern int tcp_rcv_established(struct sock *sk, - struct sk_buff *skb, - struct tcphdr *th, - unsigned len); - -extern void tcp_rcv_space_adjust(struct sock *sk); - -extern void tcp_cleanup_rbuf(struct sock *sk, int copied); - -extern int tcp_twsk_unique(struct sock *sk, - struct sock *sktw, void *twp); - -extern void tcp_twsk_destructor(struct sock *sk); - -extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, unsigned int flags); +extern void tcp_v4_err(struct sk_buff *skb, u32); + +extern void tcp_shutdown (struct sock *sk, int how); + +extern int tcp_v4_rcv(struct sk_buff *skb); + +extern int tcp_v4_remember_stamp(struct sock *sk); +extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); +extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t size); +extern int tcp_sendpage(struct sock *sk, struct page *page, int offset, + size_t size, int flags); +extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); +extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, + struct tcphdr *th, unsigned len); +extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, + struct tcphdr *th, unsigned len); +extern void tcp_rcv_space_adjust(struct sock *sk); +extern void tcp_cleanup_rbuf(struct sock *sk, int copied); +extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); +extern void tcp_twsk_destructor(struct sock *sk); +extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) @@ -372,88 +357,59 @@ enum tcp_tw_status { }; -extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, - struct sk_buff *skb, - const struct tcphdr *th); - -extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, - struct request_sock *req, - struct request_sock **prev); -extern int tcp_child_process(struct sock *parent, - struct sock *child, - struct sk_buff *skb); -extern int tcp_use_frto(struct sock *sk); -extern void tcp_enter_frto(struct sock *sk); -extern void tcp_enter_loss(struct sock *sk, int how); -extern void tcp_clear_retrans(struct tcp_sock *tp); -extern void tcp_update_metrics(struct sock *sk); - -extern void tcp_close(struct sock *sk, - long timeout); -extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); - -extern int tcp_getsockopt(struct sock *sk, int level, - int optname, - char __user *optval, - int __user *optlen); -extern int tcp_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, - unsigned int optlen); -extern int compat_tcp_getsockopt(struct sock *sk, - int level, int optname, - char __user *optval, int __user *optlen); -extern int compat_tcp_setsockopt(struct sock *sk, - int level, int optname, - char __user *optval, unsigned int optlen); -extern void tcp_set_keepalive(struct sock *sk, int val); -extern void tcp_syn_ack_timeout(struct sock *sk, - struct request_sock *req); -extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, - size_t len, int nonblock, - int flags, int *addr_len); - -extern void tcp_parse_options(struct sk_buff *skb, - struct tcp_options_received *opt_rx, - u8 **hvpp, - int estab); - -extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); +extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, + struct sk_buff *skb, + const struct tcphdr *th); +extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, + struct request_sock *req, + struct request_sock **prev); +extern int tcp_child_process(struct sock *parent, struct sock *child, + struct sk_buff *skb); +extern int tcp_use_frto(struct sock *sk); +extern void tcp_enter_frto(struct sock *sk); +extern void tcp_enter_loss(struct sock *sk, int how); +extern void tcp_clear_retrans(struct tcp_sock *tp); +extern void tcp_update_metrics(struct sock *sk); +extern void tcp_close(struct sock *sk, long timeout); +extern unsigned int tcp_poll(struct file * file, struct socket *sock, + struct poll_table_struct *wait); +extern int tcp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +extern int tcp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); +extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); +extern void tcp_set_keepalive(struct sock *sk, int val); +extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); +extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len, int nonblock, int flags, int *addr_len); +extern void tcp_parse_options(struct sk_buff *skb, + struct tcp_options_received *opt_rx, u8 **hvpp, + int estab); +extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); /* * TCP v4 functions exported for the inet6 API */ -extern void tcp_v4_send_check(struct sock *sk, - struct sk_buff *skb); - -extern int tcp_v4_conn_request(struct sock *sk, - struct sk_buff *skb); - -extern struct sock * tcp_create_openreq_child(struct sock *sk, - struct request_sock *req, - struct sk_buff *skb); - -extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, - struct sk_buff *skb, - struct request_sock *req, - struct dst_entry *dst); - -extern int tcp_v4_do_rcv(struct sock *sk, +extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); +extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); +extern struct sock * tcp_create_openreq_child(struct sock *sk, + struct request_sock *req, struct sk_buff *skb); - -extern int tcp_v4_connect(struct sock *sk, - struct sockaddr *uaddr, - int addr_len); - -extern int tcp_connect(struct sock *sk); - -extern struct sk_buff * tcp_make_synack(struct sock *sk, - struct dst_entry *dst, - struct request_sock *req, - struct request_values *rvp); - -extern int tcp_disconnect(struct sock *sk, int flags); +extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst); +extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); +extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, + int addr_len); +extern int tcp_connect(struct sock *sk); +extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, + struct request_sock *req, + struct request_values *rvp); +extern int tcp_disconnect(struct sock *sk, int flags); /* From syncookies.c */ @@ -464,7 +420,7 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss); extern __u32 cookie_init_timestamp(struct request_sock *req); -extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt); +extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); /* From net/ipv6/syncookies.c */ extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); @@ -485,10 +441,10 @@ extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); extern void tcp_send_probe0(struct sock *); extern void tcp_send_partial(struct sock *); -extern int tcp_write_wakeup(struct sock *); +extern int tcp_write_wakeup(struct sock *); extern void tcp_send_fin(struct sock *sk); extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); -extern int tcp_send_synack(struct sock *); +extern int tcp_send_synack(struct sock *); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); extern void tcp_send_delayed_ack(struct sock *sk); @@ -592,7 +548,7 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp) * scaling applied to the result. The caller does these things * if necessary. This is a "raw" window selection. */ -extern u32 __tcp_select_window(struct sock *sk); +extern u32 __tcp_select_window(struct sock *sk); /* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot @@ -602,12 +558,22 @@ extern u32 __tcp_select_window(struct sock *sk); */ #define tcp_time_stamp ((__u32)(jiffies)) +#define tcp_flag_byte(th) (((u_int8_t *)th)[13]) + +#define TCPHDR_FIN 0x01 +#define TCPHDR_SYN 0x02 +#define TCPHDR_RST 0x04 +#define TCPHDR_PSH 0x08 +#define TCPHDR_ACK 0x10 +#define TCPHDR_URG 0x20 +#define TCPHDR_ECE 0x40 +#define TCPHDR_CWR 0x80 + /* This is what the send packet queuing engine uses to pass - * TCP per-packet control information to the transmission - * code. We also store the host-order sequence numbers in - * here too. This is 36 bytes on 32-bit architectures, - * 40 bytes on 64-bit machines, if this grows please adjust - * skbuff.h:skbuff->cb[xxx] size appropriately. + * TCP per-packet control information to the transmission code. + * We also store the host-order sequence numbers in here too. + * This is 44 bytes if IPV6 is enabled. + * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. */ struct tcp_skb_cb { union { @@ -620,19 +586,6 @@ struct tcp_skb_cb { __u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 when; /* used to compute rtt's */ __u8 flags; /* TCP header flags. */ - - /* NOTE: These must match up to the flags byte in a - * real TCP header. - */ -#define TCPCB_FLAG_FIN 0x01 -#define TCPCB_FLAG_SYN 0x02 -#define TCPCB_FLAG_RST 0x04 -#define TCPCB_FLAG_PSH 0x08 -#define TCPCB_FLAG_ACK 0x10 -#define TCPCB_FLAG_URG 0x20 -#define TCPCB_FLAG_ECE 0x40 -#define TCPCB_FLAG_CWR 0x80 - __u8 sacked; /* State flags for SACK/FACK. */ #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ @@ -1176,22 +1129,14 @@ struct tcp_md5sig_pool { #define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */ /* - functions */ -extern int tcp_v4_md5_hash_skb(char *md5_hash, - struct tcp_md5sig_key *key, - struct sock *sk, - struct request_sock *req, - struct sk_buff *skb); - -extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, - struct sock *addr_sk); - -extern int tcp_v4_md5_do_add(struct sock *sk, - __be32 addr, - u8 *newkey, - u8 newkeylen); - -extern int tcp_v4_md5_do_del(struct sock *sk, - __be32 addr); +extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, + struct sock *sk, struct request_sock *req, + struct sk_buff *skb); +extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk, + struct sock *addr_sk); +extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey, + u8 newkeylen); +extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr); #ifdef CONFIG_TCP_MD5SIG #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \ @@ -1204,10 +1149,10 @@ extern int tcp_v4_md5_do_del(struct sock *sk, #endif extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); -extern void tcp_free_md5sig_pool(void); +extern void tcp_free_md5sig_pool(void); extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); -extern void tcp_put_md5sig_pool(void); +extern void tcp_put_md5sig_pool(void); extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, @@ -1413,7 +1358,8 @@ struct tcp_iter_state { sa_family_t family; enum tcp_seq_states state; struct sock *syn_wait_sk; - int bucket, sbucket, num, uid; + int bucket, offset, sbucket, num, uid; + loff_t last_pos; }; extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo); @@ -1434,7 +1380,7 @@ extern int tcp_gro_complete(struct sk_buff *skb); extern int tcp4_gro_complete(struct sk_buff *skb); #ifdef CONFIG_PROC_FS -extern int tcp4_proc_init(void); +extern int tcp4_proc_init(void); extern void tcp4_proc_exit(void); #endif diff --git a/include/net/udp.h b/include/net/udp.h index 5348d80b25bb8a0593c9f0a80f42c4a6b2aeebc9..7abdf305da50fad63880b12c2c76a69b6b74a5fd 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -157,30 +157,28 @@ static inline void udp_lib_close(struct sock *sk, long timeout) sk_common_release(sk); } -extern int udp_lib_get_port(struct sock *sk, unsigned short snum, - int (*)(const struct sock *,const struct sock *), - unsigned int hash2_nulladdr); +extern int udp_lib_get_port(struct sock *sk, unsigned short snum, + int (*)(const struct sock *,const struct sock *), + unsigned int hash2_nulladdr); /* net/ipv4/udp.c */ -extern int udp_get_port(struct sock *sk, unsigned short snum, - int (*saddr_cmp)(const struct sock *, const struct sock *)); -extern void udp_err(struct sk_buff *, u32); - -extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, +extern int udp_get_port(struct sock *sk, unsigned short snum, + int (*saddr_cmp)(const struct sock *, + const struct sock *)); +extern void udp_err(struct sk_buff *, u32); +extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); -extern void udp_flush_pending_frames(struct sock *sk); - -extern int udp_rcv(struct sk_buff *skb); -extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); -extern int udp_disconnect(struct sock *sk, int flags); +extern void udp_flush_pending_frames(struct sock *sk); +extern int udp_rcv(struct sk_buff *skb); +extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); +extern int udp_disconnect(struct sock *sk, int flags); extern unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); -extern int udp_lib_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen, - int (*push_pending_frames)(struct sock *)); - +extern int udp_lib_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen, + int (*push_pending_frames)(struct sock *)); extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif); @@ -236,7 +234,7 @@ struct udp_iter_state { extern int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo); extern void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo); -extern int udp4_proc_init(void); +extern int udp4_proc_init(void); extern void udp4_proc_exit(void); #endif diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index b69e6e173ea19fb8e5ccba2c62a8425dca7ccf0e..9b2c30897e5037f79cb51081e8a7015258fc7918 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -65,7 +65,7 @@ struct rxrpc_header { }; __be16 serviceId; /* service ID */ -} __attribute__((packed)); +} __packed; #define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X) @@ -120,7 +120,7 @@ struct rxrpc_ackpacket { #define RXRPC_ACK_TYPE_NACK 0 #define RXRPC_ACK_TYPE_ACK 1 -} __attribute__((packed)); +} __packed; /* * ACK packets can have a further piece of information tagged on the end @@ -141,7 +141,7 @@ struct rxkad_challenge { __be32 nonce; /* encrypted random number */ __be32 min_level; /* minimum security level */ __be32 __padding; /* padding to 8-byte boundary */ -} __attribute__((packed)); +} __packed; /*****************************************************************************/ /* @@ -164,7 +164,7 @@ struct rxkad_response { __be32 kvno; /* Kerberos key version number */ __be32 ticket_len; /* Kerberos ticket length */ -} __attribute__((packed)); +} __packed; /*****************************************************************************/ /* diff --git a/kernel/audit.c b/kernel/audit.c index c71bd26631a28ccf3ecb1d90319b7b35f5e6f227..8296aa516c5a728ff3db1f61fe337b1f21e4a40b 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -407,7 +407,7 @@ static void kauditd_send_skb(struct sk_buff *skb) audit_hold_skb(skb); } else /* drop the extra reference if sent ok */ - kfree_skb(skb); + consume_skb(skb); } static int kauditd_thread(void *dummy) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3ac6f5b0a64b7448aceac7407c04cf7109f7ef5d..a8ce099544049e787464c3e4261ac70625cc0653 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1788,6 +1788,29 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) return retval; } +/** + * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup + * @tsk: the task to be attached + */ +int cgroup_attach_task_current_cg(struct task_struct *tsk) +{ + struct cgroupfs_root *root; + struct cgroup *cur_cg; + int retval = 0; + + cgroup_lock(); + for_each_active_root(root) { + cur_cg = task_cgroup_from_root(current, root); + retval = cgroup_attach_task(cur_cg, tsk); + if (retval) + break; + } + cgroup_unlock(); + + return retval; +} +EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); + /* * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex * held. May take task_lock of task diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index b2d70d38dff454116a3cba9dee760981f298415b..25915832291acf2931d2ac7bbabdcad1c3cf3aee 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -9,6 +9,7 @@ #include #include #include +#include #include /* @@ -82,3 +83,46 @@ void free_user_ns(struct kref *kref) schedule_work(&ns->destroyer); } EXPORT_SYMBOL(free_user_ns); + +uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid) +{ + struct user_namespace *tmp; + + if (likely(to == cred->user->user_ns)) + return uid; + + + /* Is cred->user the creator of the target user_ns + * or the creator of one of it's parents? + */ + for ( tmp = to; tmp != &init_user_ns; + tmp = tmp->creator->user_ns ) { + if (cred->user == tmp->creator) { + return (uid_t)0; + } + } + + /* No useful relationship so no mapping */ + return overflowuid; +} + +gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid) +{ + struct user_namespace *tmp; + + if (likely(to == cred->user->user_ns)) + return gid; + + /* Is cred->user the creator of the target user_ns + * or the creator of one of it's parents? + */ + for ( tmp = to; tmp != &init_user_ns; + tmp = tmp->creator->user_ns ) { + if (cred->user == tmp->creator) { + return (gid_t)0; + } + } + + /* No useful relationship so no mapping */ + return overflowgid; +} diff --git a/lib/vsprintf.c b/lib/vsprintf.c index b8a2f549ab0ef5db70ecae07de8ea3be4d67a06d..4ee19d0d3910b36ad15f88688db46d5d1249fa61 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -980,6 +980,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr, * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] * little endian output byte order is: * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] + * - 'V' For a struct va_format which contains a format string * and va_list *, + * call vsnprintf(->format, *->va_list). + * Implements a "recursive vsnprintf". + * Do not use this feature without some mechanism to verify the + * correctness of the format string and va_list arguments. * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a @@ -1025,6 +1030,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, break; case 'U': return uuid_string(buf, end, ptr, spec, fmt); + case 'V': + return buf + vsnprintf(buf, end - buf, + ((struct va_format *)ptr)->fmt, + *(((struct va_format *)ptr)->va)); } spec.flags |= SMALL; if (spec.field_width == -1) { diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 3c1c8c14e929d689104035727a462d5d6f9f18d8..a2ad1525057590ef6fce28eb342a101ae8c2ed19 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -155,9 +155,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) BUG_ON(!grp); /* Take it out of our own structures, but be sure to interlock with - * HW accelerating devices or SW vlan input packet processing. + * HW accelerating devices or SW vlan input packet processing if + * VLAN is not 0 (leave it there for 802.1p). */ - if (real_dev->features & NETIF_F_HW_VLAN_FILTER) + if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER)) ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); grp->nr_vlans--; @@ -419,6 +420,14 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, if (is_vlan_dev(dev)) __vlan_device_event(dev, event); + if ((event == NETDEV_UP) && + (dev->features & NETIF_F_HW_VLAN_FILTER) && + dev->netdev_ops->ndo_vlan_rx_add_vid) { + pr_info("8021q: adding VLAN 0 to HW filter on device %s\n", + dev->name); + dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0); + } + grp = __vlan_find_group(dev); if (!grp) goto out; diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 6abdcac1b2e81f1e59e3029569b719ea86c2c4db..8d9503ad01daa65729212de33f579b32aeffc1dc 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -2,6 +2,7 @@ #define __BEN_VLAN_802_1Q_INC__ #include +#include /** @@ -21,14 +22,16 @@ struct vlan_priority_tci_mapping { * struct vlan_rx_stats - VLAN percpu rx stats * @rx_packets: number of received packets * @rx_bytes: number of received bytes - * @multicast: number of received multicast packets + * @rx_multicast: number of received multicast packets + * @syncp: synchronization point for 64bit counters * @rx_errors: number of errors */ struct vlan_rx_stats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long multicast; - unsigned long rx_errors; + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + struct u64_stats_sync syncp; + unsigned long rx_errors; }; /** diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 50f58f5f1c3491f775a5ac6c97ea32c5c5b23674..01ddb0472f86c511f49ff8a602a726dd60550ff0 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -8,6 +8,9 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling) { + struct net_device *vlan_dev; + u16 vlan_id; + if (netpoll_rx(skb)) return NET_RX_DROP; @@ -16,9 +19,12 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, skb->skb_iif = skb->dev->ifindex; __vlan_hwaccel_put_tag(skb, vlan_tci); - skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); + vlan_id = vlan_tci & VLAN_VID_MASK; + vlan_dev = vlan_group_get_device(grp, vlan_id); - if (!skb->dev) + if (vlan_dev) + skb->dev = vlan_dev; + else if (vlan_id) goto drop; return (polling ? netif_receive_skb(skb) : netif_rx(skb)); @@ -41,9 +47,9 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); skb->vlan_tci = 0; - rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, - smp_processor_id()); + rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats); + u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; @@ -51,7 +57,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) case PACKET_BROADCAST: break; case PACKET_MULTICAST: - rx_stats->multicast++; + rx_stats->rx_multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. @@ -62,6 +68,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) skb->pkt_type = PACKET_HOST; break; } + u64_stats_update_end(&rx_stats->syncp); return 0; } @@ -82,15 +89,20 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci, struct sk_buff *skb) { struct sk_buff *p; + struct net_device *vlan_dev; + u16 vlan_id; if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) skb->deliver_no_wcard = 1; skb->skb_iif = skb->dev->ifindex; __vlan_hwaccel_put_tag(skb, vlan_tci); - skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); + vlan_id = vlan_tci & VLAN_VID_MASK; + vlan_dev = vlan_group_get_device(grp, vlan_id); - if (!skb->dev) + if (vlan_dev) + skb->dev = vlan_dev; + else if (vlan_id) goto drop; for (p = napi->gro_list; p; p = p->next) { diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 52984267781782cd78aa0bd7bc3d38280745c1b1..3d59c9bf8febf5064150ff50d22db9d569453131 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -142,6 +142,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, { struct vlan_hdr *vhdr; struct vlan_rx_stats *rx_stats; + struct net_device *vlan_dev; u16 vlan_id; u16 vlan_tci; @@ -157,53 +158,71 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, vlan_id = vlan_tci & VLAN_VID_MASK; rcu_read_lock(); - skb->dev = __find_vlan_dev(dev, vlan_id); - if (!skb->dev) { - pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", - __func__, vlan_id, dev->name); - goto err_unlock; - } - - rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, - smp_processor_id()); - rx_stats->rx_packets++; - rx_stats->rx_bytes += skb->len; - - skb_pull_rcsum(skb, VLAN_HLEN); - - skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); - - pr_debug("%s: priority: %u for TCI: %hu\n", - __func__, skb->priority, vlan_tci); - - switch (skb->pkt_type) { - case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ - /* stats->broadcast ++; // no such counter :-( */ - break; + vlan_dev = __find_vlan_dev(dev, vlan_id); - case PACKET_MULTICAST: - rx_stats->multicast++; - break; + /* If the VLAN device is defined, we use it. + * If not, and the VID is 0, it is a 802.1p packet (not + * really a VLAN), so we will just netif_rx it later to the + * original interface, but with the skb->proto set to the + * wrapped proto: we do nothing here. + */ - case PACKET_OTHERHOST: - /* Our lower layer thinks this is not local, let's make sure. - * This allows the VLAN to have a different MAC than the - * underlying device, and still route correctly. - */ - if (!compare_ether_addr(eth_hdr(skb)->h_dest, - skb->dev->dev_addr)) - skb->pkt_type = PACKET_HOST; - break; - default: - break; + if (!vlan_dev) { + if (vlan_id) { + pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", + __func__, vlan_id, dev->name); + goto err_unlock; + } + rx_stats = NULL; + } else { + skb->dev = vlan_dev; + + rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, + smp_processor_id()); + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->rx_packets++; + rx_stats->rx_bytes += skb->len; + + skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); + + pr_debug("%s: priority: %u for TCI: %hu\n", + __func__, skb->priority, vlan_tci); + + switch (skb->pkt_type) { + case PACKET_BROADCAST: + /* Yeah, stats collect these together.. */ + /* stats->broadcast ++; // no such counter :-( */ + break; + + case PACKET_MULTICAST: + rx_stats->rx_multicast++; + break; + + case PACKET_OTHERHOST: + /* Our lower layer thinks this is not local, let's make + * sure. + * This allows the VLAN to have a different MAC than the + * underlying device, and still route correctly. + */ + if (!compare_ether_addr(eth_hdr(skb)->h_dest, + skb->dev->dev_addr)) + skb->pkt_type = PACKET_HOST; + break; + default: + break; + } + u64_stats_update_end(&rx_stats->syncp); } + skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); - skb = vlan_check_reorder_header(skb); - if (!skb) { - rx_stats->rx_errors++; - goto err_unlock; + if (vlan_dev) { + skb = vlan_check_reorder_header(skb); + if (!skb) { + rx_stats->rx_errors++; + goto err_unlock; + } } netif_rx(skb); @@ -801,37 +820,65 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev) return dev_ethtool_get_flags(vlan->real_dev); } -static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct net_device_stats *stats = &dev->stats; - dev_txq_stats_fold(dev, stats); if (vlan_dev_info(dev)->vlan_rx_stats) { - struct vlan_rx_stats *p, rx = {0}; + struct vlan_rx_stats *p, accum = {0}; int i; for_each_possible_cpu(i) { + u64 rxpackets, rxbytes, rxmulticast; + unsigned int start; + p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); - rx.rx_packets += p->rx_packets; - rx.rx_bytes += p->rx_bytes; - rx.rx_errors += p->rx_errors; - rx.multicast += p->multicast; + do { + start = u64_stats_fetch_begin_bh(&p->syncp); + rxpackets = p->rx_packets; + rxbytes = p->rx_bytes; + rxmulticast = p->rx_multicast; + } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + accum.rx_packets += rxpackets; + accum.rx_bytes += rxbytes; + accum.rx_multicast += rxmulticast; + /* rx_errors is an ulong, not protected by syncp */ + accum.rx_errors += p->rx_errors; } - stats->rx_packets = rx.rx_packets; - stats->rx_bytes = rx.rx_bytes; - stats->rx_errors = rx.rx_errors; - stats->multicast = rx.multicast; + stats->rx_packets = accum.rx_packets; + stats->rx_bytes = accum.rx_bytes; + stats->rx_errors = accum.rx_errors; + stats->multicast = accum.rx_multicast; } return stats; } +static int vlan_ethtool_set_tso(struct net_device *dev, u32 data) +{ + if (data) { + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; + + /* Underlying device must support TSO for VLAN-tagged packets + * and must have TSO enabled now. + */ + if (!(real_dev->vlan_features & NETIF_F_TSO)) + return -EOPNOTSUPP; + if (!(real_dev->features & NETIF_F_TSO)) + return -EINVAL; + dev->features |= NETIF_F_TSO; + } else { + dev->features &= ~NETIF_F_TSO; + } + return 0; +} + static const struct ethtool_ops vlan_ethtool_ops = { .get_settings = vlan_ethtool_get_settings, .get_drvinfo = vlan_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, .get_rx_csum = vlan_ethtool_get_rx_csum, .get_flags = vlan_ethtool_get_flags, + .set_tso = vlan_ethtool_set_tso, }; static const struct net_device_ops vlan_netdev_ops = { @@ -848,7 +895,7 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, - .ndo_get_stats = vlan_dev_get_stats, + .ndo_get_stats64 = vlan_dev_get_stats64, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, @@ -872,7 +919,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, - .ndo_get_stats = vlan_dev_get_stats, + .ndo_get_stats64 = vlan_dev_get_stats64, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, @@ -897,7 +944,7 @@ static const struct net_device_ops vlan_netdev_ops_sq = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, - .ndo_get_stats = vlan_dev_get_stats, + .ndo_get_stats64 = vlan_dev_get_stats64, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, @@ -922,7 +969,7 @@ static const struct net_device_ops vlan_netdev_accel_ops_sq = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, - .ndo_get_stats = vlan_dev_get_stats, + .ndo_get_stats64 = vlan_dev_get_stats64, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index afead353e215fc0028530b033ee9104c8bb51ca5..80e280f56686989796264db386f69057a65dc165 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -278,25 +278,27 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); - const struct net_device_stats *stats; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats; static const char fmt[] = "%30s %12lu\n"; + static const char fmt64[] = "%30s %12llu\n"; int i; if (!is_vlan_dev(vlandev)) return 0; - stats = dev_get_stats(vlandev); + stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", vlandev->name, dev_info->vlan_id, (int)(dev_info->flags & 1), vlandev->priv_flags); - seq_printf(seq, fmt, "total frames received", stats->rx_packets); - seq_printf(seq, fmt, "total bytes received", stats->rx_bytes); - seq_printf(seq, fmt, "Broadcast/Multicast Rcvd", stats->multicast); + seq_printf(seq, fmt64, "total frames received", stats->rx_packets); + seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); + seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); seq_puts(seq, "\n"); - seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets); - seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes); + seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); + seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); seq_printf(seq, fmt, "total headroom inc", dev_info->cnt_inc_headroom_on_tx); seq_printf(seq, fmt, "total encap on xmit", diff --git a/net/Kconfig b/net/Kconfig index 0d68b40fc0e625d79aece260f08952c1ac374987..e24fa0873f32efff6fb48ffa06354b14f7e8ba63 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -32,7 +32,7 @@ config WANT_COMPAT_NETLINK_MESSAGES config COMPAT_NETLINK_MESSAGES def_bool y depends on COMPAT - depends on WIRELESS_EXT || WANT_COMPAT_NETLINK_MESSAGES + depends on WEXT_CORE || WANT_COMPAT_NETLINK_MESSAGES help This option makes it possible to send different netlink messages to tasks depending on whether the task is a compat task or not. To @@ -86,6 +86,16 @@ config NETWORK_SECMARK to nfmark, but designated for security purposes. If you are unsure how to answer this question, answer N. +config NETWORK_PHY_TIMESTAMPING + bool "Timestamping in PHY devices" + depends on EXPERIMENTAL + help + This allows timestamping of network packets by PHYs with + hardware timestamping capabilities. This option adds some + overhead in the transmit and receive paths. + + If you are unsure how to answer this question, answer N. + menuconfig NETFILTER bool "Network packet filtering framework (Netfilter)" ---help--- diff --git a/net/Makefile b/net/Makefile index cb7bdc1210cbeec5be41916d8fb052b2929cf054..41d420070a38cb2001cfacfb34835dfb45659573 100644 --- a/net/Makefile +++ b/net/Makefile @@ -50,7 +50,7 @@ endif obj-$(CONFIG_IP_DCCP) += dccp/ obj-$(CONFIG_IP_SCTP) += sctp/ obj-$(CONFIG_RDS) += rds/ -obj-y += wireless/ +obj-$(CONFIG_WIRELESS) += wireless/ obj-$(CONFIG_MAC80211) += mac80211/ obj-$(CONFIG_TIPC) += tipc/ obj-$(CONFIG_NETLABEL) += netlabel/ @@ -61,7 +61,7 @@ obj-$(CONFIG_CAIF) += caif/ ifneq ($(CONFIG_DCB),) obj-y += dcb/ endif -obj-y += ieee802154/ +obj-$(CONFIG_IEEE802154) += ieee802154/ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_SYSCTL) += sysctl_net.o diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 6719af6a59fada591a8bc9e912cab196de5a6c81..651babdfab3845ebd11eb6cc89fe85eae1d3f8ca 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -139,6 +139,43 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s) return NULL; } +static int atm_dev_event(struct notifier_block *this, unsigned long event, + void *arg) +{ + struct atm_dev *atm_dev = arg; + struct list_head *lh; + struct net_device *net_dev; + struct br2684_vcc *brvcc; + struct atm_vcc *atm_vcc; + unsigned long flags; + + pr_debug("event=%ld dev=%p\n", event, atm_dev); + + read_lock_irqsave(&devs_lock, flags); + list_for_each(lh, &br2684_devs) { + net_dev = list_entry_brdev(lh); + + list_for_each_entry(brvcc, &BRPRIV(net_dev)->brvccs, brvccs) { + atm_vcc = brvcc->atmvcc; + if (atm_vcc && brvcc->atmvcc->dev == atm_dev) { + + if (atm_vcc->dev->signal == ATM_PHY_SIG_LOST) + netif_carrier_off(net_dev); + else + netif_carrier_on(net_dev); + + } + } + } + read_unlock_irqrestore(&devs_lock, flags); + + return NOTIFY_DONE; +} + +static struct notifier_block atm_dev_notifier = { + .notifier_call = atm_dev_event, +}; + /* chained vcc->pop function. Check if we should wake the netif_queue */ static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb) { @@ -362,6 +399,12 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) unregister_netdev(net_dev); free_netdev(net_dev); } + read_lock_irq(&devs_lock); + if (list_empty(&br2684_devs)) { + /* last br2684 device */ + unregister_atmdevice_notifier(&atm_dev_notifier); + } + read_unlock_irq(&devs_lock); return; } @@ -530,6 +573,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) br2684_push(atmvcc, skb); } + + /* initialize netdev carrier state */ + if (atmvcc->dev->signal == ATM_PHY_SIG_LOST) + netif_carrier_off(net_dev); + else + netif_carrier_on(net_dev); + __module_get(THIS_MODULE); return 0; @@ -620,9 +670,16 @@ static int br2684_create(void __user *arg) } write_lock_irq(&devs_lock); + brdev->payload = payload; - brdev->number = list_empty(&br2684_devs) ? 1 : - BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; + + if (list_empty(&br2684_devs)) { + /* 1st br2684 device */ + register_atmdevice_notifier(&atm_dev_notifier); + brdev->number = 1; + } else + brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; + list_add_tail(&brdev->br2684_devs, &br2684_devs); write_unlock_irq(&devs_lock); return 0; @@ -772,6 +829,11 @@ static void __exit br2684_exit(void) remove_proc_entry("br2684", atm_proc_root); #endif + + /* if not already empty */ + if (!list_empty(&br2684_devs)) + unregister_atmdevice_notifier(&atm_dev_notifier); + while (!list_empty(&br2684_devs)) { net_dev = list_entry_brdev(br2684_devs.next); brdev = BRPRIV(net_dev); diff --git a/net/atm/clip.c b/net/atm/clip.c index 313aba11316b5764e759fa7356f5f8286d69ceb5..95fdd1185067a859d49d0f5f5ea246e4fcad0893 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -522,7 +522,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) error = ip_route_output_key(&init_net, &rt, &fl); if (error) return error; - neigh = __neigh_lookup(&clip_tbl, &ip, rt->u.dst.dev, 1); + neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1); ip_rt_put(rt); if (!neigh) return -ENOMEM; diff --git a/net/atm/common.c b/net/atm/common.c index b43feb1a3995f77a75d0c445859ab70030c2f477..940404a73b3dddf05e262a7c5c727784c1a65bf3 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -37,6 +37,8 @@ EXPORT_SYMBOL(vcc_hash); DEFINE_RWLOCK(vcc_sklist_lock); EXPORT_SYMBOL(vcc_sklist_lock); +static ATOMIC_NOTIFIER_HEAD(atm_dev_notify_chain); + static void __vcc_insert_socket(struct sock *sk) { struct atm_vcc *vcc = atm_sk(sk); @@ -212,6 +214,22 @@ void vcc_release_async(struct atm_vcc *vcc, int reply) } EXPORT_SYMBOL(vcc_release_async); +void atm_dev_signal_change(struct atm_dev *dev, char signal) +{ + pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n", + __func__, signal, dev, dev->number, dev->signal); + + /* atm driver sending invalid signal */ + WARN_ON(signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND); + + if (dev->signal == signal) + return; /* no change */ + + dev->signal = signal; + + atomic_notifier_call_chain(&atm_dev_notify_chain, signal, dev); +} +EXPORT_SYMBOL(atm_dev_signal_change); void atm_dev_release_vccs(struct atm_dev *dev) { @@ -781,6 +799,18 @@ int vcc_getsockopt(struct socket *sock, int level, int optname, return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); } +int register_atmdevice_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&atm_dev_notify_chain, nb); +} +EXPORT_SYMBOL_GPL(register_atmdevice_notifier); + +void unregister_atmdevice_notifier(struct notifier_block *nb) +{ + atomic_notifier_chain_unregister(&atm_dev_notify_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_atmdevice_notifier); + static int __init atm_init(void) { int error; diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index ee3b3049d385839297d7a87e53fde0d6b5ec8456..ed371684c133d184e9d68f87b85ea171f6107a13 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -43,19 +43,6 @@ config BT_L2CAP Say Y here to compile L2CAP support into the kernel or say M to compile it as module (l2cap). -config BT_L2CAP_EXT_FEATURES - bool "L2CAP Extended Features support (EXPERIMENTAL)" - depends on BT_L2CAP && EXPERIMENTAL - help - This option enables the L2CAP Extended Features support. These - new features include the Enhanced Retransmission and Streaming - Modes, the Frame Check Sequence (FCS), and Segmentation and - Reassembly (SAR) for L2CAP packets. They are a required for the - new Alternate MAC/PHY and the Bluetooth Medical Profile. - - You should say N unless you know what you are doing. Note that - this is in an experimental state yet. - config BT_SCO tristate "SCO links support" depends on BT diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index 0d9e506f5d5a76b40b02e2f3e4fdc4138baf25bb..70672544db86940dee12845e0a00752d22e15dda 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h @@ -86,26 +86,26 @@ struct bnep_setup_conn_req { __u8 ctrl; __u8 uuid_size; __u8 service[0]; -} __attribute__((packed)); +} __packed; struct bnep_set_filter_req { __u8 type; __u8 ctrl; __be16 len; __u8 list[0]; -} __attribute__((packed)); +} __packed; struct bnep_control_rsp { __u8 type; __u8 ctrl; __be16 resp; -} __attribute__((packed)); +} __packed; struct bnep_ext_hdr { __u8 type; __u8 len; __u8 data[0]; -} __attribute__((packed)); +} __packed; /* BNEP ioctl defines */ #define BNEPCONNADD _IOW('B', 200, int) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 800b6b9fbbaefe15c90406e7631acd4e25b172b3..0b1e460fe440cfb07a86c0a15e87b7e98c03ba0f 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1,6 +1,6 @@ /* BlueZ - Bluetooth protocol stack for Linux - Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky @@ -155,6 +155,27 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); } +/* Device _must_ be locked */ +void hci_sco_setup(struct hci_conn *conn, __u8 status) +{ + struct hci_conn *sco = conn->link; + + BT_DBG("%p", conn); + + if (!sco) + return; + + if (!status) { + if (lmp_esco_capable(conn->hdev)) + hci_setup_sync(sco, conn->handle); + else + hci_add_sco(sco, conn->handle); + } else { + hci_proto_connect_cfm(sco, status); + hci_conn_del(sco); + } +} + static void hci_conn_timeout(unsigned long arg) { struct hci_conn *conn = (void *) arg; @@ -385,10 +406,13 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 acl->power_save = 1; hci_conn_enter_active_mode(acl); - if (lmp_esco_capable(hdev)) - hci_setup_sync(sco, acl->handle); - else - hci_add_sco(sco, acl->handle); + if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { + /* defer SCO setup until mode change completed */ + set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend); + return sco; + } + + hci_sco_setup(acl, 0x00); } return sco; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2f768de8701178091008cca90346647790fe6706..8303f1c9ef548a13ab7ecfa8f2403b0a048f413e 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -562,6 +562,7 @@ static int hci_dev_do_close(struct hci_dev *hdev) hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); + hci_blacklist_clear(hdev); hci_dev_unlock_bh(hdev); hci_notify(hdev, HCI_DEV_DOWN); @@ -913,7 +914,7 @@ int hci_register_dev(struct hci_dev *hdev) skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); - for (i = 0; i < 3; i++) + for (i = 0; i < NUM_REASSEMBLY; i++) hdev->reassembly[i] = NULL; init_waitqueue_head(&hdev->req_wait_q); @@ -923,6 +924,8 @@ int hci_register_dev(struct hci_dev *hdev) hci_conn_hash_init(hdev); + INIT_LIST_HEAD(&hdev->blacklist.list); + memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); atomic_set(&hdev->promisc, 0); @@ -970,7 +973,7 @@ int hci_unregister_dev(struct hci_dev *hdev) hci_dev_do_close(hdev); - for (i = 0; i < 3; i++) + for (i = 0; i < NUM_REASSEMBLY; i++) kfree_skb(hdev->reassembly[i]); hci_notify(hdev, HCI_DEV_UNREG); @@ -1030,89 +1033,170 @@ int hci_recv_frame(struct sk_buff *skb) } EXPORT_SYMBOL(hci_recv_frame); -/* Receive packet type fragment */ -#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2]) - -int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) +static int hci_reassembly(struct hci_dev *hdev, int type, void *data, + int count, __u8 index, gfp_t gfp_mask) { - if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) + int len = 0; + int hlen = 0; + int remain = count; + struct sk_buff *skb; + struct bt_skb_cb *scb; + + if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || + index >= NUM_REASSEMBLY) return -EILSEQ; + skb = hdev->reassembly[index]; + + if (!skb) { + switch (type) { + case HCI_ACLDATA_PKT: + len = HCI_MAX_FRAME_SIZE; + hlen = HCI_ACL_HDR_SIZE; + break; + case HCI_EVENT_PKT: + len = HCI_MAX_EVENT_SIZE; + hlen = HCI_EVENT_HDR_SIZE; + break; + case HCI_SCODATA_PKT: + len = HCI_MAX_SCO_SIZE; + hlen = HCI_SCO_HDR_SIZE; + break; + } + + skb = bt_skb_alloc(len, gfp_mask); + if (!skb) + return -ENOMEM; + + scb = (void *) skb->cb; + scb->expect = hlen; + scb->pkt_type = type; + + skb->dev = (void *) hdev; + hdev->reassembly[index] = skb; + } + while (count) { - struct sk_buff *skb = __reassembly(hdev, type); - struct { int expect; } *scb; - int len = 0; + scb = (void *) skb->cb; + len = min(scb->expect, (__u16)count); - if (!skb) { - /* Start of the frame */ + memcpy(skb_put(skb, len), data, len); - switch (type) { - case HCI_EVENT_PKT: - if (count >= HCI_EVENT_HDR_SIZE) { - struct hci_event_hdr *h = data; - len = HCI_EVENT_HDR_SIZE + h->plen; - } else - return -EILSEQ; - break; + count -= len; + data += len; + scb->expect -= len; + remain = count; - case HCI_ACLDATA_PKT: - if (count >= HCI_ACL_HDR_SIZE) { - struct hci_acl_hdr *h = data; - len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); - } else - return -EILSEQ; - break; + switch (type) { + case HCI_EVENT_PKT: + if (skb->len == HCI_EVENT_HDR_SIZE) { + struct hci_event_hdr *h = hci_event_hdr(skb); + scb->expect = h->plen; + + if (skb_tailroom(skb) < scb->expect) { + kfree_skb(skb); + hdev->reassembly[index] = NULL; + return -ENOMEM; + } + } + break; - case HCI_SCODATA_PKT: - if (count >= HCI_SCO_HDR_SIZE) { - struct hci_sco_hdr *h = data; - len = HCI_SCO_HDR_SIZE + h->dlen; - } else - return -EILSEQ; - break; + case HCI_ACLDATA_PKT: + if (skb->len == HCI_ACL_HDR_SIZE) { + struct hci_acl_hdr *h = hci_acl_hdr(skb); + scb->expect = __le16_to_cpu(h->dlen); + + if (skb_tailroom(skb) < scb->expect) { + kfree_skb(skb); + hdev->reassembly[index] = NULL; + return -ENOMEM; + } } + break; - skb = bt_skb_alloc(len, GFP_ATOMIC); - if (!skb) { - BT_ERR("%s no memory for packet", hdev->name); - return -ENOMEM; + case HCI_SCODATA_PKT: + if (skb->len == HCI_SCO_HDR_SIZE) { + struct hci_sco_hdr *h = hci_sco_hdr(skb); + scb->expect = h->dlen; + + if (skb_tailroom(skb) < scb->expect) { + kfree_skb(skb); + hdev->reassembly[index] = NULL; + return -ENOMEM; + } } + break; + } + + if (scb->expect == 0) { + /* Complete frame */ - skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = type; + hci_recv_frame(skb); - __reassembly(hdev, type) = skb; + hdev->reassembly[index] = NULL; + return remain; + } + } - scb = (void *) skb->cb; - scb->expect = len; - } else { - /* Continuation */ + return remain; +} - scb = (void *) skb->cb; - len = scb->expect; - } +int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) +{ + int rem = 0; - len = min(len, count); + if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) + return -EILSEQ; - memcpy(skb_put(skb, len), data, len); + while (count) { + rem = hci_reassembly(hdev, type, data, count, + type - 1, GFP_ATOMIC); + if (rem < 0) + return rem; - scb->expect -= len; + data += (count - rem); + count = rem; + }; - if (scb->expect == 0) { - /* Complete frame */ + return rem; +} +EXPORT_SYMBOL(hci_recv_fragment); - __reassembly(hdev, type) = NULL; +#define STREAM_REASSEMBLY 0 - bt_cb(skb)->pkt_type = type; - hci_recv_frame(skb); - } +int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) +{ + int type; + int rem = 0; - count -= len; data += len; - } + while (count) { + struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY]; - return 0; + if (!skb) { + struct { char type; } *pkt; + + /* Start of the frame */ + pkt = data; + type = pkt->type; + + data++; + count--; + } else + type = bt_cb(skb)->pkt_type; + + rem = hci_reassembly(hdev, type, data, + count, STREAM_REASSEMBLY, GFP_ATOMIC); + if (rem < 0) + return rem; + + data += (count - rem); + count = rem; + }; + + return rem; } -EXPORT_SYMBOL(hci_recv_fragment); +EXPORT_SYMBOL(hci_recv_stream_fragment); /* ---- Interface to upper protocols ---- */ diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 786b5de0bac42819ae3e207c942840a69f2c13da..bfef5bae0b3a83487c8894e171cf7fe6f447e651 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1,6 +1,6 @@ /* BlueZ - Bluetooth protocol stack for Linux - Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky @@ -584,7 +584,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) conn->out = 1; conn->link_mode |= HCI_LM_MASTER; } else - BT_ERR("No memmory for new connection"); + BT_ERR("No memory for new connection"); } } @@ -785,9 +785,13 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) + if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); + if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) + hci_sco_setup(conn, status); + } + hci_dev_unlock(hdev); } @@ -808,9 +812,13 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); - if (conn) + if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); + if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) + hci_sco_setup(conn, status); + } + hci_dev_unlock(hdev); } @@ -915,20 +923,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s } else conn->state = BT_CLOSED; - if (conn->type == ACL_LINK) { - struct hci_conn *sco = conn->link; - if (sco) { - if (!ev->status) { - if (lmp_esco_capable(hdev)) - hci_setup_sync(sco, conn->handle); - else - hci_add_sco(sco, conn->handle); - } else { - hci_proto_connect_cfm(sco, ev->status); - hci_conn_del(sco); - } - } - } + if (conn->type == ACL_LINK) + hci_sco_setup(conn, ev->status); if (ev->status) { hci_proto_connect_cfm(conn, ev->status); @@ -952,7 +948,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); - if (mask & HCI_LM_ACCEPT) { + if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) { /* Connection accepted */ struct inquiry_entry *ie; struct hci_conn *conn; @@ -965,7 +961,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { - BT_ERR("No memmory for new connection"); + BT_ERR("No memory for new connection"); hci_dev_unlock(hdev); return; } @@ -1481,6 +1477,9 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb else conn->power_save = 0; } + + if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) + hci_sco_setup(conn, ev->status); } hci_dev_unlock(hdev); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 38f08f6b86f6b1f4a4b8c0305e91746e0b42affc..4f170a59593474ca174f4e266f39931de57e0212 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -165,6 +165,86 @@ static int hci_sock_release(struct socket *sock) return 0; } +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct list_head *p; + struct bdaddr_list *blacklist = &hdev->blacklist; + + list_for_each(p, &blacklist->list) { + struct bdaddr_list *b; + + b = list_entry(p, struct bdaddr_list, list); + + if (bacmp(bdaddr, &b->bdaddr) == 0) + return b; + } + + return NULL; +} + +static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg) +{ + bdaddr_t bdaddr; + struct bdaddr_list *entry; + + if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) + return -EFAULT; + + if (bacmp(&bdaddr, BDADDR_ANY) == 0) + return -EBADF; + + if (hci_blacklist_lookup(hdev, &bdaddr)) + return -EEXIST; + + entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, &bdaddr); + + list_add(&entry->list, &hdev->blacklist.list); + + return 0; +} + +int hci_blacklist_clear(struct hci_dev *hdev) +{ + struct list_head *p, *n; + struct bdaddr_list *blacklist = &hdev->blacklist; + + list_for_each_safe(p, n, &blacklist->list) { + struct bdaddr_list *b; + + b = list_entry(p, struct bdaddr_list, list); + + list_del(p); + kfree(b); + } + + return 0; +} + +static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg) +{ + bdaddr_t bdaddr; + struct bdaddr_list *entry; + + if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) + return -EFAULT; + + if (bacmp(&bdaddr, BDADDR_ANY) == 0) + return hci_blacklist_clear(hdev); + + entry = hci_blacklist_lookup(hdev, &bdaddr); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + /* Ioctls that require bound socket */ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { @@ -194,6 +274,16 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign case HCIGETAUTHINFO: return hci_get_auth_info(hdev, (void __user *) arg); + case HCIBLOCKADDR: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_blacklist_add(hdev, (void __user *) arg); + + case HCIUNBLOCKADDR: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_blacklist_del(hdev, (void __user *) arg); + default: if (hdev->ioctl) return hdev->ioctl(hdev, cmd, arg); diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 463ffa4fe042dee7539205ee4fb7e713fa145f1c..ce44c47eeac1a028e0463dcf12ebddb4ba9774fc 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -436,6 +436,41 @@ static const struct file_operations inquiry_cache_fops = { .release = single_release, }; +static int blacklist_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + struct bdaddr_list *blacklist = &hdev->blacklist; + struct list_head *l; + + hci_dev_lock_bh(hdev); + + list_for_each(l, &blacklist->list) { + struct bdaddr_list *b; + bdaddr_t bdaddr; + + b = list_entry(l, struct bdaddr_list, list); + + baswap(&bdaddr, &b->bdaddr); + + seq_printf(f, "%s\n", batostr(&bdaddr)); + } + + hci_dev_unlock_bh(hdev); + + return 0; +} + +static int blacklist_open(struct inode *inode, struct file *file) +{ + return single_open(file, blacklist_show, inode->i_private); +} + +static const struct file_operations blacklist_fops = { + .open = blacklist_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; int hci_register_sysfs(struct hci_dev *hdev) { struct device *dev = &hdev->dev; @@ -465,6 +500,9 @@ int hci_register_sysfs(struct hci_dev *hdev) debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, hdev, &inquiry_cache_fops); + debugfs_create_file("blacklist", 0444, hdev->debugfs, + hdev, &blacklist_fops); + return 0; } diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index cf3c4073a8a655d6e9d8936cb61d30254224962f..9ba1e8eee37c7b1ae49c3a242bdefa176462659f 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -1,6 +1,8 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (C) 2009-2010 Gustavo F. Padovan + Copyright (C) 2010 Google Inc. Written 2000,2001 by Maxim Krasnyansky @@ -53,15 +55,9 @@ #include #include -#define VERSION "2.14" +#define VERSION "2.15" -#ifdef CONFIG_BT_L2CAP_EXT_FEATURES -static int enable_ertm = 1; -#else -static int enable_ertm = 0; -#endif -static int max_transmit = L2CAP_DEFAULT_MAX_TX; -static int tx_window = L2CAP_DEFAULT_TX_WINDOW; +static int disable_ertm = 0; static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; static u8 l2cap_fixed_chan[8] = { 0x02, }; @@ -80,9 +76,12 @@ static void __l2cap_sock_close(struct sock *sk, int reason); static void l2cap_sock_close(struct sock *sk); static void l2cap_sock_kill(struct sock *sk); +static int l2cap_build_conf_req(struct sock *sk, void *data); static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); +static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); + /* ---- L2CAP timers ---- */ static void l2cap_sock_timeout(unsigned long arg) { @@ -278,6 +277,24 @@ static void l2cap_chan_del(struct sock *sk, int err) parent->sk_data_ready(parent, 0); } else sk->sk_state_change(sk); + + skb_queue_purge(TX_QUEUE(sk)); + + if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { + struct srej_list *l, *tmp; + + del_timer(&l2cap_pi(sk)->retrans_timer); + del_timer(&l2cap_pi(sk)->monitor_timer); + del_timer(&l2cap_pi(sk)->ack_timer); + + skb_queue_purge(SREJ_QUEUE(sk)); + skb_queue_purge(BUSY_QUEUE(sk)); + + list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { + list_del(&l->list); + kfree(l); + } + } } /* Service level security */ @@ -351,8 +368,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) struct sk_buff *skb; struct l2cap_hdr *lh; struct l2cap_conn *conn = pi->conn; + struct sock *sk = (struct sock *)pi; int count, hlen = L2CAP_HDR_SIZE + 2; + if (sk->sk_state != BT_CONNECTED) + return; + if (pi->fcs == L2CAP_FCS_CRC16) hlen += 2; @@ -440,24 +461,57 @@ static void l2cap_do_start(struct sock *sk) } } -static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk) +static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) +{ + u32 local_feat_mask = l2cap_feat_mask; + if (!disable_ertm) + local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; + + switch (mode) { + case L2CAP_MODE_ERTM: + return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; + case L2CAP_MODE_STREAMING: + return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; + default: + return 0x00; + } +} + +static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) { struct l2cap_disconn_req req; + if (!conn) + return; + + skb_queue_purge(TX_QUEUE(sk)); + + if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { + del_timer(&l2cap_pi(sk)->retrans_timer); + del_timer(&l2cap_pi(sk)->monitor_timer); + del_timer(&l2cap_pi(sk)->ack_timer); + } + req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); req.scid = cpu_to_le16(l2cap_pi(sk)->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, sizeof(req), &req); + + sk->sk_state = BT_DISCONN; + sk->sk_err = err; } /* ---- L2CAP connections ---- */ static void l2cap_conn_start(struct l2cap_conn *conn) { struct l2cap_chan_list *l = &conn->chan_list; + struct sock_del_list del, *tmp1, *tmp2; struct sock *sk; BT_DBG("conn %p", conn); + INIT_LIST_HEAD(&del.list); + read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { @@ -470,20 +524,38 @@ static void l2cap_conn_start(struct l2cap_conn *conn) } if (sk->sk_state == BT_CONNECT) { - if (l2cap_check_security(sk) && - __l2cap_no_conn_pending(sk)) { - struct l2cap_conn_req req; - req.scid = cpu_to_le16(l2cap_pi(sk)->scid); - req.psm = l2cap_pi(sk)->psm; + struct l2cap_conn_req req; - l2cap_pi(sk)->ident = l2cap_get_ident(conn); - l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; + if (!l2cap_check_security(sk) || + !__l2cap_no_conn_pending(sk)) { + bh_unlock_sock(sk); + continue; + } - l2cap_send_cmd(conn, l2cap_pi(sk)->ident, - L2CAP_CONN_REQ, sizeof(req), &req); + if (!l2cap_mode_supported(l2cap_pi(sk)->mode, + conn->feat_mask) + && l2cap_pi(sk)->conf_state & + L2CAP_CONF_STATE2_DEVICE) { + tmp1 = kzalloc(sizeof(struct sock_del_list), + GFP_ATOMIC); + tmp1->sk = sk; + list_add_tail(&tmp1->list, &del.list); + bh_unlock_sock(sk); + continue; } + + req.scid = cpu_to_le16(l2cap_pi(sk)->scid); + req.psm = l2cap_pi(sk)->psm; + + l2cap_pi(sk)->ident = l2cap_get_ident(conn); + l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; + + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_REQ, sizeof(req), &req); + } else if (sk->sk_state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; + char buf[128]; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); @@ -506,12 +578,31 @@ static void l2cap_conn_start(struct l2cap_conn *conn) l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); + + if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT || + rsp.result != L2CAP_CR_SUCCESS) { + bh_unlock_sock(sk); + continue; + } + + l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(sk, buf), buf); + l2cap_pi(sk)->num_conf_req++; } bh_unlock_sock(sk); } read_unlock(&l->lock); + + list_for_each_entry_safe(tmp1, tmp2, &del.list, list) { + bh_lock_sock(tmp1->sk); + __l2cap_sock_close(tmp1->sk, ECONNRESET); + bh_unlock_sock(tmp1->sk); + list_del(&tmp1->list); + kfree(tmp1); + } } static void l2cap_conn_ready(struct l2cap_conn *conn) @@ -740,9 +831,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason) sk->sk_type == SOCK_STREAM) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; - sk->sk_state = BT_DISCONN; l2cap_sock_set_timer(sk, sk->sk_sndtimeo); - l2cap_send_disconn_req(conn, sk); + l2cap_send_disconn_req(conn, sk, reason); } else l2cap_chan_del(sk, reason); break; @@ -802,6 +892,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) pi->imtu = l2cap_pi(parent)->imtu; pi->omtu = l2cap_pi(parent)->omtu; + pi->conf_state = l2cap_pi(parent)->conf_state; pi->mode = l2cap_pi(parent)->mode; pi->fcs = l2cap_pi(parent)->fcs; pi->max_tx = l2cap_pi(parent)->max_tx; @@ -812,13 +903,15 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) } else { pi->imtu = L2CAP_DEFAULT_MTU; pi->omtu = 0; - if (enable_ertm && sk->sk_type == SOCK_STREAM) + if (!disable_ertm && sk->sk_type == SOCK_STREAM) { pi->mode = L2CAP_MODE_ERTM; - else + pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; + } else { pi->mode = L2CAP_MODE_BASIC; - pi->max_tx = max_transmit; + } + pi->max_tx = L2CAP_DEFAULT_MAX_TX; pi->fcs = L2CAP_FCS_CRC16; - pi->tx_win = tx_window; + pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; pi->force_reliable = 0; @@ -1067,7 +1160,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: - if (enable_ertm) + if (!disable_ertm) break; /* fall through */ default: @@ -1084,6 +1177,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al case BT_CONNECTED: /* Already connected */ + err = -EISCONN; goto done; case BT_OPEN: @@ -1132,7 +1226,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: - if (enable_ertm) + if (!disable_ertm) break; /* fall through */ default: @@ -1285,9 +1379,11 @@ static void l2cap_monitor_timeout(unsigned long arg) { struct sock *sk = (void *) arg; + BT_DBG("sk %p", sk); + bh_lock_sock(sk); if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { - l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); + l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED); bh_unlock_sock(sk); return; } @@ -1303,6 +1399,8 @@ static void l2cap_retrans_timeout(unsigned long arg) { struct sock *sk = (void *) arg; + BT_DBG("sk %p", sk); + bh_lock_sock(sk); l2cap_pi(sk)->retry_count = 1; __mod_monitor_timer(); @@ -1341,7 +1439,7 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) hci_send_acl(pi->conn->hcon, skb, 0); } -static int l2cap_streaming_send(struct sock *sk) +static void l2cap_streaming_send(struct sock *sk) { struct sk_buff *skb, *tx_skb; struct l2cap_pinfo *pi = l2cap_pi(sk); @@ -1371,7 +1469,6 @@ static int l2cap_streaming_send(struct sock *sk) skb = skb_dequeue(TX_QUEUE(sk)); kfree_skb(skb); } - return 0; } static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) @@ -1395,15 +1492,22 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) if (pi->remote_max_tx && bt_cb(skb)->retries == pi->remote_max_tx) { - l2cap_send_disconn_req(pi->conn, sk); + l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); return; } tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); + + if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { + control |= L2CAP_CTRL_FINAL; + pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; + } + control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); if (pi->fcs == L2CAP_FCS_CRC16) { @@ -1421,15 +1525,14 @@ static int l2cap_ertm_send(struct sock *sk) u16 control, fcs; int nsent = 0; - if (pi->conn_state & L2CAP_CONN_WAIT_F) - return 0; + if (sk->sk_state != BT_CONNECTED) + return -ENOTCONN; - while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) && - !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { + while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) { if (pi->remote_max_tx && bt_cb(skb)->retries == pi->remote_max_tx) { - l2cap_send_disconn_req(pi->conn, sk); + l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); break; } @@ -1438,6 +1541,8 @@ static int l2cap_ertm_send(struct sock *sk) bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); + control &= L2CAP_CTRL_SAR; + if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { control |= L2CAP_CTRL_FINAL; pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; @@ -1478,16 +1583,11 @@ static int l2cap_retransmit_frames(struct sock *sk) struct l2cap_pinfo *pi = l2cap_pi(sk); int ret; - spin_lock_bh(&pi->send_lock); - if (!skb_queue_empty(TX_QUEUE(sk))) sk->sk_send_head = TX_QUEUE(sk)->next; pi->next_tx_seq = pi->expected_ack_seq; ret = l2cap_ertm_send(sk); - - spin_unlock_bh(&pi->send_lock); - return ret; } @@ -1495,7 +1595,6 @@ static void l2cap_send_ack(struct l2cap_pinfo *pi) { struct sock *sk = (struct sock *)pi; u16 control = 0; - int nframes; control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; @@ -1506,11 +1605,7 @@ static void l2cap_send_ack(struct l2cap_pinfo *pi) return; } - spin_lock_bh(&pi->send_lock); - nframes = l2cap_ertm_send(sk); - spin_unlock_bh(&pi->send_lock); - - if (nframes > 0) + if (l2cap_ertm_send(sk) > 0) return; control |= L2CAP_SUPER_RCV_READY; @@ -1705,10 +1800,8 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz size += buflen; } skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); - spin_lock_bh(&pi->send_lock); if (sk->sk_send_head == NULL) sk->sk_send_head = sar_queue.next; - spin_unlock_bh(&pi->send_lock); return size; } @@ -1753,7 +1846,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms case L2CAP_MODE_BASIC: /* Check outgoing MTU */ if (len > pi->omtu) { - err = -EINVAL; + err = -EMSGSIZE; goto done; } @@ -1780,14 +1873,9 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms } __skb_queue_tail(TX_QUEUE(sk), skb); - if (pi->mode == L2CAP_MODE_ERTM) - spin_lock_bh(&pi->send_lock); - if (sk->sk_send_head == NULL) sk->sk_send_head = skb; - if (pi->mode == L2CAP_MODE_ERTM) - spin_unlock_bh(&pi->send_lock); } else { /* Segment SDU into multiples PDUs */ err = l2cap_sar_segment_sdu(sk, msg, len); @@ -1796,11 +1884,14 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms } if (pi->mode == L2CAP_MODE_STREAMING) { - err = l2cap_streaming_send(sk); + l2cap_streaming_send(sk); } else { - spin_lock_bh(&pi->send_lock); + if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && + pi->conn_state && L2CAP_CONN_WAIT_F) { + err = len; + break; + } err = l2cap_ertm_send(sk); - spin_unlock_bh(&pi->send_lock); } if (err >= 0) @@ -1809,7 +1900,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms default: BT_DBG("bad state %1.1x", pi->mode); - err = -EINVAL; + err = -EBADFD; } done: @@ -1825,6 +1916,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { struct l2cap_conn_rsp rsp; + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + u8 buf[128]; sk->sk_state = BT_CONFIG; @@ -1835,6 +1928,16 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); + if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) { + release_sock(sk); + return 0; + } + + l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(sk, buf), buf); + l2cap_pi(sk)->num_conf_req++; + release_sock(sk); return 0; } @@ -1871,13 +1974,19 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us break; } + if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) { + err = -EINVAL; + break; + } + l2cap_pi(sk)->mode = opts.mode; switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: + l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: - if (enable_ertm) + if (!disable_ertm) break; /* fall through */ default: @@ -2145,6 +2254,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } + + if (!err && sk->sk_err) + err = -sk->sk_err; + release_sock(sk); return err; } @@ -2365,25 +2478,10 @@ static inline void l2cap_ertm_init(struct sock *sk) __skb_queue_head_init(SREJ_QUEUE(sk)); __skb_queue_head_init(BUSY_QUEUE(sk)); - spin_lock_init(&l2cap_pi(sk)->send_lock); INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work); -} -static int l2cap_mode_supported(__u8 mode, __u32 feat_mask) -{ - u32 local_feat_mask = l2cap_feat_mask; - if (enable_ertm) - local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; - - switch (mode) { - case L2CAP_MODE_ERTM: - return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; - case L2CAP_MODE_STREAMING: - return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; - default: - return 0x00; - } + sk->sk_backlog_rcv = l2cap_ertm_data_rcv; } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) @@ -2414,10 +2512,10 @@ static int l2cap_build_conf_req(struct sock *sk, void *data) switch (pi->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: - pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; - if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) - l2cap_send_disconn_req(pi->conn, sk); - break; + if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE) + break; + + /* fall through */ default: pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); break; @@ -2428,6 +2526,14 @@ static int l2cap_build_conf_req(struct sock *sk, void *data) case L2CAP_MODE_BASIC: if (pi->imtu != L2CAP_DEFAULT_MTU) l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); + + rfc.mode = L2CAP_MODE_BASIC; + rfc.txwin_size = 0; + rfc.max_transmit = 0; + rfc.retrans_timeout = 0; + rfc.monitor_timeout = 0; + rfc.max_pdu_size = 0; + break; case L2CAP_MODE_ERTM: @@ -2440,9 +2546,6 @@ static int l2cap_build_conf_req(struct sock *sk, void *data) if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); - if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -2463,9 +2566,6 @@ static int l2cap_build_conf_req(struct sock *sk, void *data) if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); - if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -2477,6 +2577,9 @@ static int l2cap_build_conf_req(struct sock *sk, void *data) break; } + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc); + /* FIXME: Need actual value of the flush timeout */ //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to); @@ -2541,18 +2644,21 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) } } - if (pi->num_conf_rsp || pi->num_conf_req) + if (pi->num_conf_rsp || pi->num_conf_req > 1) goto done; switch (pi->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: - pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; - if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) + if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) { + pi->mode = l2cap_select_mode(rfc.mode, + pi->conn->feat_mask); + break; + } + + if (pi->mode != rfc.mode) return -ECONNREFUSED; - break; - default: - pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); + break; } @@ -2675,7 +2781,6 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, rfc.mode != pi->mode) return -ECONNREFUSED; - pi->mode = rfc.mode; pi->fcs = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, @@ -2684,6 +2789,11 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, } } + if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode) + return -ECONNREFUSED; + + pi->mode = rfc.mode; + if (*result == L2CAP_CONF_SUCCESS) { switch (rfc.mode) { case L2CAP_MODE_ERTM: @@ -2778,7 +2888,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd struct l2cap_chan_list *list = &conn->chan_list; struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_rsp rsp; - struct sock *sk, *parent; + struct sock *parent, *uninitialized_var(sk); int result, status = L2CAP_CS_NO_INFO; u16 dcid = 0, scid = __le16_to_cpu(req->scid); @@ -2887,6 +2997,15 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd L2CAP_INFO_REQ, sizeof(info), &info); } + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) && + result == L2CAP_CR_SUCCESS) { + u8 buf[128]; + l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(sk, buf), buf); + l2cap_pi(sk)->num_conf_req++; + } + return 0; } @@ -2907,11 +3026,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd if (scid) { sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); if (!sk) - return 0; + return -EFAULT; } else { sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); if (!sk) - return 0; + return -EFAULT; } switch (result) { @@ -2919,9 +3038,13 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd sk->sk_state = BT_CONFIG; l2cap_pi(sk)->ident = 0; l2cap_pi(sk)->dcid = dcid; - l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; + if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) + break; + + l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req); l2cap_pi(sk)->num_conf_req++; @@ -2957,8 +3080,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (!sk) return -ENOENT; - if (sk->sk_state == BT_DISCONN) + if (sk->sk_state != BT_CONFIG) { + struct l2cap_cmd_rej rej; + + rej.reason = cpu_to_le16(0x0002); + l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, + sizeof(rej), &rej); goto unlock; + } /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); @@ -2984,7 +3113,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr /* Complete config. */ len = l2cap_parse_conf_req(sk, rsp); if (len < 0) { - l2cap_send_disconn_req(conn, sk); + l2cap_send_disconn_req(conn, sk, ECONNRESET); goto unlock; } @@ -3054,7 +3183,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr char req[64]; if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { - l2cap_send_disconn_req(conn, sk); + l2cap_send_disconn_req(conn, sk, ECONNRESET); goto done; } @@ -3063,7 +3192,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr len = l2cap_parse_conf_rsp(sk, rsp->data, len, req, &result); if (len < 0) { - l2cap_send_disconn_req(conn, sk); + l2cap_send_disconn_req(conn, sk, ECONNRESET); goto done; } @@ -3076,10 +3205,9 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr } default: - sk->sk_state = BT_DISCONN; sk->sk_err = ECONNRESET; l2cap_sock_set_timer(sk, HZ * 5); - l2cap_send_disconn_req(conn, sk); + l2cap_send_disconn_req(conn, sk, ECONNRESET); goto done; } @@ -3130,16 +3258,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd sk->sk_shutdown = SHUTDOWN_MASK; - skb_queue_purge(TX_QUEUE(sk)); - - if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { - skb_queue_purge(SREJ_QUEUE(sk)); - skb_queue_purge(BUSY_QUEUE(sk)); - del_timer(&l2cap_pi(sk)->retrans_timer); - del_timer(&l2cap_pi(sk)->monitor_timer); - del_timer(&l2cap_pi(sk)->ack_timer); - } - l2cap_chan_del(sk, ECONNRESET); bh_unlock_sock(sk); @@ -3162,16 +3280,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd if (!sk) return 0; - skb_queue_purge(TX_QUEUE(sk)); - - if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { - skb_queue_purge(SREJ_QUEUE(sk)); - skb_queue_purge(BUSY_QUEUE(sk)); - del_timer(&l2cap_pi(sk)->retrans_timer); - del_timer(&l2cap_pi(sk)->monitor_timer); - del_timer(&l2cap_pi(sk)->ack_timer); - } - l2cap_chan_del(sk, 0); bh_unlock_sock(sk); @@ -3194,7 +3302,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); - if (enable_ertm) + if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; put_unaligned_le32(feat_mask, rsp->data); @@ -3359,7 +3467,7 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); if (our_fcs != rcv_fcs) - return -EINVAL; + return -EBADMSG; } return 0; } @@ -3370,25 +3478,19 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk) u16 control = 0; pi->frames_sent = 0; - pi->conn_state |= L2CAP_CONN_SEND_FBIT; control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { - control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL; + control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(pi, control); pi->conn_state |= L2CAP_CONN_RNR_SENT; - pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; } - if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0) - __mod_retrans_timer(); - - pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY) + l2cap_retransmit_frames(sk); - spin_lock_bh(&pi->send_lock); l2cap_ertm_send(sk); - spin_unlock_bh(&pi->send_lock); if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && pi->frames_sent == 0) { @@ -3400,6 +3502,8 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk) static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) { struct sk_buff *next_skb; + struct l2cap_pinfo *pi = l2cap_pi(sk); + int tx_seq_offset, next_tx_seq_offset; bt_cb(skb)->tx_seq = tx_seq; bt_cb(skb)->sar = sar; @@ -3410,11 +3514,20 @@ static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_s return 0; } + tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; + if (tx_seq_offset < 0) + tx_seq_offset += 64; + do { if (bt_cb(next_skb)->tx_seq == tx_seq) return -EINVAL; - if (bt_cb(next_skb)->tx_seq > tx_seq) { + next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - + pi->buffer_seq) % 64; + if (next_tx_seq_offset < 0) + next_tx_seq_offset += 64; + + if (next_tx_seq_offset > tx_seq_offset) { __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); return 0; } @@ -3532,11 +3645,51 @@ static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 c pi->sdu = NULL; disconnect: - l2cap_send_disconn_req(pi->conn, sk); + l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); kfree_skb(skb); return 0; } +static int l2cap_try_push_rx_skb(struct sock *sk) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct sk_buff *skb; + u16 control; + int err; + + while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) { + control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; + err = l2cap_ertm_reassembly_sdu(sk, skb, control); + if (err < 0) { + skb_queue_head(BUSY_QUEUE(sk), skb); + return -EBUSY; + } + + pi->buffer_seq = (pi->buffer_seq + 1) % 64; + } + + if (!(pi->conn_state & L2CAP_CONN_RNR_SENT)) + goto done; + + control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; + l2cap_send_sframe(pi, control); + l2cap_pi(sk)->retry_count = 1; + + del_timer(&pi->retrans_timer); + __mod_monitor_timer(); + + l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; + +done: + pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY; + pi->conn_state &= ~L2CAP_CONN_RNR_SENT; + + BT_DBG("sk %p, Exit local busy", sk); + + return 0; +} + static void l2cap_busy_work(struct work_struct *work) { DECLARE_WAITQUEUE(wait, current); @@ -3545,7 +3698,6 @@ static void l2cap_busy_work(struct work_struct *work) struct sock *sk = (struct sock *)pi; int n_tries = 0, timeo = HZ/5, err; struct sk_buff *skb; - u16 control; lock_sock(sk); @@ -3555,8 +3707,8 @@ static void l2cap_busy_work(struct work_struct *work) if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) { err = -EBUSY; - l2cap_send_disconn_req(pi->conn, sk); - goto done; + l2cap_send_disconn_req(pi->conn, sk, EBUSY); + break; } if (!timeo) @@ -3564,7 +3716,7 @@ static void l2cap_busy_work(struct work_struct *work) if (signal_pending(current)) { err = sock_intr_errno(timeo); - goto done; + break; } release_sock(sk); @@ -3573,40 +3725,12 @@ static void l2cap_busy_work(struct work_struct *work) err = sock_error(sk); if (err) - goto done; - - while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) { - control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; - err = l2cap_ertm_reassembly_sdu(sk, skb, control); - if (err < 0) { - skb_queue_head(BUSY_QUEUE(sk), skb); - break; - } - - pi->buffer_seq = (pi->buffer_seq + 1) % 64; - } + break; - if (!skb) + if (l2cap_try_push_rx_skb(sk) == 0) break; } - if (!(pi->conn_state & L2CAP_CONN_RNR_SENT)) - goto done; - - control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; - control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; - l2cap_send_sframe(pi, control); - l2cap_pi(sk)->retry_count = 1; - - del_timer(&pi->retrans_timer); - __mod_monitor_timer(); - - l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; - -done: - pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY; - pi->conn_state &= ~L2CAP_CONN_RNR_SENT; - set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); @@ -3621,7 +3745,9 @@ static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control) if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; __skb_queue_tail(BUSY_QUEUE(sk), skb); - return -EBUSY; + return l2cap_try_push_rx_skb(sk); + + } err = l2cap_ertm_reassembly_sdu(sk, skb, control); @@ -3631,6 +3757,8 @@ static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control) } /* Busy Condition */ + BT_DBG("sk %p, Enter local busy", sk); + pi->conn_state |= L2CAP_CONN_LOCAL_BUSY; bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; __skb_queue_tail(BUSY_QUEUE(sk), skb); @@ -3641,6 +3769,8 @@ static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control) pi->conn_state |= L2CAP_CONN_RNR_SENT; + del_timer(&pi->ack_timer); + queue_work(_busy_wq, &pi->busy_work); return err; @@ -3754,7 +3884,7 @@ static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) l2cap_ertm_reassembly_sdu(sk, skb, control); l2cap_pi(sk)->buffer_seq_srej = (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; - tx_seq++; + tx_seq = (tx_seq + 1) % 64; } } @@ -3790,10 +3920,11 @@ static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) l2cap_send_sframe(pi, control); new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); - new->tx_seq = pi->expected_tx_seq++; + new->tx_seq = pi->expected_tx_seq; + pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; list_add_tail(&new->list, SREJ_LIST(sk)); } - pi->expected_tx_seq++; + pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; } static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) @@ -3802,11 +3933,12 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str u8 tx_seq = __get_txseq(rx_control); u8 req_seq = __get_reqseq(rx_control); u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; - u8 tx_seq_offset, expected_tx_seq_offset; + int tx_seq_offset, expected_tx_seq_offset; int num_to_ack = (pi->tx_win/6) + 1; int err = 0; - BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); + BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq, + rx_control); if (L2CAP_CTRL_FINAL & rx_control && l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { @@ -3828,7 +3960,7 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str /* invalid tx_seq */ if (tx_seq_offset >= pi->tx_win) { - l2cap_send_disconn_req(pi->conn, sk); + l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); goto drop; } @@ -3851,6 +3983,7 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str pi->buffer_seq = pi->buffer_seq_srej; pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; l2cap_send_ack(pi); + BT_DBG("sk %p, Exit SREJ_SENT", sk); } } else { struct srej_list *l; @@ -3879,6 +4012,8 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str pi->conn_state |= L2CAP_CONN_SREJ_SENT; + BT_DBG("sk %p, Enter SREJ", sk); + INIT_LIST_HEAD(SREJ_LIST(sk)); pi->buffer_seq_srej = pi->buffer_seq; @@ -3889,6 +4024,8 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str pi->conn_state |= L2CAP_CONN_SEND_PBIT; l2cap_send_srejframe(sk, tx_seq); + + del_timer(&pi->ack_timer); } return 0; @@ -3902,6 +4039,10 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str return 0; } + err = l2cap_push_rx_skb(sk, skb, rx_control); + if (err < 0) + return 0; + if (rx_control & L2CAP_CTRL_FINAL) { if (pi->conn_state & L2CAP_CONN_REJ_ACT) pi->conn_state &= ~L2CAP_CONN_REJ_ACT; @@ -3909,10 +4050,6 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str l2cap_retransmit_frames(sk); } - err = l2cap_push_rx_skb(sk, skb, rx_control); - if (err < 0) - return 0; - __mod_ack_timer(); pi->num_acked = (pi->num_acked + 1) % num_to_ack; @@ -3930,10 +4067,14 @@ static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control) { struct l2cap_pinfo *pi = l2cap_pi(sk); + BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control), + rx_control); + pi->expected_ack_seq = __get_reqseq(rx_control); l2cap_drop_acked_frames(sk); if (rx_control & L2CAP_CTRL_POLL) { + pi->conn_state |= L2CAP_CONN_SEND_FBIT; if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && (pi->unacked_frames > 0)) @@ -3962,9 +4103,7 @@ static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control) if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { l2cap_send_ack(pi); } else { - spin_lock_bh(&pi->send_lock); l2cap_ertm_send(sk); - spin_unlock_bh(&pi->send_lock); } } } @@ -3974,6 +4113,8 @@ static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control) struct l2cap_pinfo *pi = l2cap_pi(sk); u8 tx_seq = __get_reqseq(rx_control); + BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; pi->expected_ack_seq = tx_seq; @@ -3996,16 +4137,18 @@ static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control) struct l2cap_pinfo *pi = l2cap_pi(sk); u8 tx_seq = __get_reqseq(rx_control); + BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; if (rx_control & L2CAP_CTRL_POLL) { pi->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(sk); + + pi->conn_state |= L2CAP_CONN_SEND_FBIT; l2cap_retransmit_one_frame(sk, tx_seq); - spin_lock_bh(&pi->send_lock); l2cap_ertm_send(sk); - spin_unlock_bh(&pi->send_lock); if (pi->conn_state & L2CAP_CONN_WAIT_F) { pi->srej_save_reqseq = tx_seq; @@ -4031,10 +4174,15 @@ static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control) struct l2cap_pinfo *pi = l2cap_pi(sk); u8 tx_seq = __get_reqseq(rx_control); + BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); + pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; pi->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(sk); + if (rx_control & L2CAP_CTRL_POLL) + pi->conn_state |= L2CAP_CONN_SEND_FBIT; + if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) { del_timer(&pi->retrans_timer); if (rx_control & L2CAP_CTRL_POLL) @@ -4082,12 +4230,83 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str return 0; } +static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + u16 control; + u8 req_seq; + int len, next_tx_seq_offset, req_seq_offset; + + control = get_unaligned_le16(skb->data); + skb_pull(skb, 2); + len = skb->len; + + /* + * We can just drop the corrupted I-frame here. + * Receiver will miss it and start proper recovery + * procedures and ask retransmission. + */ + if (l2cap_check_fcs(pi, skb)) + goto drop; + + if (__is_sar_start(control) && __is_iframe(control)) + len -= 2; + + if (pi->fcs == L2CAP_FCS_CRC16) + len -= 2; + + if (len > pi->mps) { + l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); + goto drop; + } + + req_seq = __get_reqseq(control); + req_seq_offset = (req_seq - pi->expected_ack_seq) % 64; + if (req_seq_offset < 0) + req_seq_offset += 64; + + next_tx_seq_offset = + (pi->next_tx_seq - pi->expected_ack_seq) % 64; + if (next_tx_seq_offset < 0) + next_tx_seq_offset += 64; + + /* check for invalid req-seq */ + if (req_seq_offset > next_tx_seq_offset) { + l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); + goto drop; + } + + if (__is_iframe(control)) { + if (len < 0) { + l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); + goto drop; + } + + l2cap_data_channel_iframe(sk, control, skb); + } else { + if (len != 0) { + BT_ERR("%d", len); + l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); + goto drop; + } + + l2cap_data_channel_sframe(sk, control, skb); + } + + return 0; + +drop: + kfree_skb(skb); + return 0; +} + static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) { struct sock *sk; struct l2cap_pinfo *pi; - u16 control, len; - u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset; + u16 control; + u8 tx_seq; + int len; sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); if (!sk) { @@ -4117,59 +4336,11 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk break; case L2CAP_MODE_ERTM: - control = get_unaligned_le16(skb->data); - skb_pull(skb, 2); - len = skb->len; - - if (__is_sar_start(control)) - len -= 2; - - if (pi->fcs == L2CAP_FCS_CRC16) - len -= 2; - - /* - * We can just drop the corrupted I-frame here. - * Receiver will miss it and start proper recovery - * procedures and ask retransmission. - */ - if (len > pi->mps) { - l2cap_send_disconn_req(pi->conn, sk); - goto drop; - } - - if (l2cap_check_fcs(pi, skb)) - goto drop; - - req_seq = __get_reqseq(control); - req_seq_offset = (req_seq - pi->expected_ack_seq) % 64; - if (req_seq_offset < 0) - req_seq_offset += 64; - - next_tx_seq_offset = - (pi->next_tx_seq - pi->expected_ack_seq) % 64; - if (next_tx_seq_offset < 0) - next_tx_seq_offset += 64; - - /* check for invalid req-seq */ - if (req_seq_offset > next_tx_seq_offset) { - l2cap_send_disconn_req(pi->conn, sk); - goto drop; - } - - if (__is_iframe(control)) { - if (len < 4) { - l2cap_send_disconn_req(pi->conn, sk); - goto drop; - } - - l2cap_data_channel_iframe(sk, control, skb); + if (!sock_owned_by_user(sk)) { + l2cap_ertm_data_rcv(sk, skb); } else { - if (len != 0) { - l2cap_send_disconn_req(pi->conn, sk); + if (sk_add_backlog(sk, skb)) goto drop; - } - - l2cap_data_channel_sframe(sk, control, skb); } goto done; @@ -4179,16 +4350,16 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk skb_pull(skb, 2); len = skb->len; + if (l2cap_check_fcs(pi, skb)) + goto drop; + if (__is_sar_start(control)) len -= 2; if (pi->fcs == L2CAP_FCS_CRC16) len -= 2; - if (len > pi->mps || len < 4 || __is_sframe(control)) - goto drop; - - if (l2cap_check_fcs(pi, skb)) + if (len > pi->mps || len < 0 || __is_sframe(control)) goto drop; tx_seq = __get_txseq(control); @@ -4288,7 +4459,7 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) struct hlist_node *node; if (type != ACL_LINK) - return 0; + return -EINVAL; BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); @@ -4321,7 +4492,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); if (hcon->type != ACL_LINK) - return 0; + return -EINVAL; if (!status) { conn = l2cap_conn_add(hcon, status); @@ -4350,7 +4521,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) BT_DBG("hcon %p reason %d", hcon, reason); if (hcon->type != ACL_LINK) - return 0; + return -EINVAL; l2cap_conn_del(hcon, bt_err(reason)); @@ -4679,14 +4850,8 @@ EXPORT_SYMBOL(l2cap_load); module_init(l2cap_init); module_exit(l2cap_exit); -module_param(enable_ertm, bool, 0644); -MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode"); - -module_param(max_transmit, uint, 0644); -MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)"); - -module_param(tx_window, uint, 0644); -MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)"); +module_param(disable_ertm, bool, 0644); +MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); MODULE_AUTHOR("Marcel Holtmann "); MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 43fbf6b4b4bfcefdcf9ddcd64c0549959bfb50d6..44a623275951e4b481abf1942fb2587867891dca 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -1152,7 +1152,7 @@ int __init rfcomm_init_sockets(void) return err; } -void rfcomm_cleanup_sockets(void) +void __exit rfcomm_cleanup_sockets(void) { debugfs_remove(rfcomm_sock_debugfs); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 309b6c261b258e985c256674b1df7cba480f82aa..026205c18b7850a6736284a521dcd604ff06fc36 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -1153,7 +1153,7 @@ static const struct tty_operations rfcomm_ops = { .tiocmset = rfcomm_tty_tiocmset, }; -int rfcomm_init_ttys(void) +int __init rfcomm_init_ttys(void) { rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); if (!rfcomm_tty_driver) @@ -1183,7 +1183,7 @@ int rfcomm_init_ttys(void) return 0; } -void rfcomm_cleanup_ttys(void) +void __exit rfcomm_cleanup_ttys(void) { tty_unregister_driver(rfcomm_tty_driver); put_tty_driver(rfcomm_tty_driver); diff --git a/net/bridge/br.c b/net/bridge/br.c index 76357b547752327d2ff4a7ce310effc5f4142b79..c8436fa31344000ffebe460a9f496b7cfc339ce8 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -63,7 +63,6 @@ static int __init br_init(void) goto err_out4; brioctl_set(br_ioctl_deviceless_stub); - br_handle_frame_hook = br_handle_frame; #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) br_fdb_test_addr_hook = br_fdb_test_addr; @@ -100,7 +99,6 @@ static void __exit br_deinit(void) br_fdb_test_addr_hook = NULL; #endif - br_handle_frame_hook = NULL; br_fdb_fini(); } diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 753fc4221f3c3587e41dfd816fd6e2771578a243..cf09fe591fc20cc73a2fdd20d13860f88c33134d 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -22,7 +22,7 @@ #include #include "br_private.h" -/* net device transmit always called with no BH (preempt_disabled) */ +/* net device transmit always called with BH disabled */ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); @@ -38,17 +38,26 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) } #endif + u64_stats_update_begin(&brstats->syncp); brstats->tx_packets++; brstats->tx_bytes += skb->len; + u64_stats_update_end(&brstats->syncp); BR_INPUT_SKB_CB(skb)->brdev = dev; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); + rcu_read_lock(); if (is_multicast_ether_addr(dest)) { - if (br_multicast_rcv(br, NULL, skb)) + if (unlikely(netpoll_tx_running(dev))) { + br_flood_deliver(br, skb); + goto out; + } + if (br_multicast_rcv(br, NULL, skb)) { + kfree_skb(skb); goto out; + } mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) @@ -61,6 +70,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) br_flood_deliver(br, skb); out: + rcu_read_unlock(); return NETDEV_TX_OK; } @@ -92,21 +102,25 @@ static int br_dev_stop(struct net_device *dev) return 0; } -static struct net_device_stats *br_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct net_bridge *br = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct br_cpu_netstats sum = { 0 }; + struct br_cpu_netstats tmp, sum = { 0 }; unsigned int cpu; for_each_possible_cpu(cpu) { + unsigned int start; const struct br_cpu_netstats *bstats = per_cpu_ptr(br->stats, cpu); - - sum.tx_bytes += bstats->tx_bytes; - sum.tx_packets += bstats->tx_packets; - sum.rx_bytes += bstats->rx_bytes; - sum.rx_packets += bstats->rx_packets; + do { + start = u64_stats_fetch_begin(&bstats->syncp); + memcpy(&tmp, bstats, sizeof(tmp)); + } while (u64_stats_fetch_retry(&bstats->syncp, start)); + sum.tx_bytes += tmp.tx_bytes; + sum.tx_packets += tmp.tx_packets; + sum.rx_bytes += tmp.rx_bytes; + sum.rx_packets += tmp.rx_packets; } stats->tx_bytes = sum.tx_bytes; @@ -127,7 +141,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu) #ifdef CONFIG_BRIDGE_NETFILTER /* remember the MTU in the rtable for PMTU */ - br->fake_rtable.u.dst.metrics[RTAX_MTU - 1] = new_mtu; + br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu; #endif return 0; @@ -199,65 +213,81 @@ static int br_set_tx_csum(struct net_device *dev, u32 data) } #ifdef CONFIG_NET_POLL_CONTROLLER -static bool br_devices_support_netpoll(struct net_bridge *br) +static void br_poll_controller(struct net_device *br_dev) { - struct net_bridge_port *p; - bool ret = true; - int count = 0; - unsigned long flags; - - spin_lock_irqsave(&br->lock, flags); - list_for_each_entry(p, &br->port_list, list) { - count++; - if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) || - !p->dev->netdev_ops->ndo_poll_controller) - ret = false; - } - spin_unlock_irqrestore(&br->lock, flags); - return count != 0 && ret; } -void br_netpoll_cleanup(struct net_device *dev) +static void br_netpoll_cleanup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; - const struct net_device_ops *ops; - br->dev->npinfo = NULL; list_for_each_entry_safe(p, n, &br->port_list, list) { - if (p->dev) { - ops = p->dev->netdev_ops; - if (ops->ndo_netpoll_cleanup) - ops->ndo_netpoll_cleanup(p->dev); - else - p->dev->npinfo = NULL; - } + br_netpoll_disable(p); } } -void br_netpoll_disable(struct net_bridge *br, - struct net_device *dev) +static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) { - if (br_devices_support_netpoll(br)) - br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; - if (dev->netdev_ops->ndo_netpoll_cleanup) - dev->netdev_ops->ndo_netpoll_cleanup(dev); - else - dev->npinfo = NULL; + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_port *p, *n; + int err = 0; + + list_for_each_entry_safe(p, n, &br->port_list, list) { + if (!p->dev) + continue; + + err = br_netpoll_enable(p); + if (err) + goto fail; + } + +out: + return err; + +fail: + br_netpoll_cleanup(dev); + goto out; } -void br_netpoll_enable(struct net_bridge *br, - struct net_device *dev) +int br_netpoll_enable(struct net_bridge_port *p) { - if (br_devices_support_netpoll(br)) { - br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; - if (br->dev->npinfo) - dev->npinfo = br->dev->npinfo; - } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) { - br->dev->priv_flags |= IFF_DISABLE_NETPOLL; - br_info(br,"new device %s does not support netpoll (disabling)", - dev->name); + struct netpoll *np; + int err = 0; + + np = kzalloc(sizeof(*p->np), GFP_KERNEL); + err = -ENOMEM; + if (!np) + goto out; + + np->dev = p->dev; + + err = __netpoll_setup(np); + if (err) { + kfree(np); + goto out; } + + p->np = np; + +out: + return err; +} + +void br_netpoll_disable(struct net_bridge_port *p) +{ + struct netpoll *np = p->np; + + if (!np) + return; + + p->np = NULL; + + /* Wait for transmitting packets to finish before freeing. */ + synchronize_rcu_bh(); + + __netpoll_cleanup(np); + kfree(np); } #endif @@ -280,13 +310,15 @@ static const struct net_device_ops br_netdev_ops = { .ndo_open = br_dev_open, .ndo_stop = br_dev_stop, .ndo_start_xmit = br_dev_xmit, - .ndo_get_stats = br_get_stats, + .ndo_get_stats64 = br_get_stats64, .ndo_set_mac_address = br_set_mac_address, .ndo_set_multicast_list = br_dev_set_multicast_list, .ndo_change_mtu = br_change_mtu, .ndo_do_ioctl = br_dev_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_netpoll_setup = br_netpoll_setup, .ndo_netpoll_cleanup = br_netpoll_cleanup, + .ndo_poll_controller = br_poll_controller, #endif }; diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index b01dde35a69ed3f1cecf30ebe604cc1808d72a69..90512ccfd3e973c19adade047de7eda77ffd2963 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -214,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br, spin_unlock_bh(&br->hash_lock); } -/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */ +/* No locking or refcounting, assumes caller has rcu_read_lock */ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, const unsigned char *addr) { @@ -240,11 +240,11 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) struct net_bridge_fdb_entry *fdb; int ret; - if (!dev->br_port) + if (!br_port_exists(dev)) return 0; rcu_read_lock(); - fdb = __br_fdb_get(dev->br_port->br, addr); + fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr); ret = fdb && fdb->dst->dev != dev && fdb->dst->state == BR_STATE_FORWARDING; rcu_read_unlock(); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 595da45f908854d0c9eef2e54ba94280de5afcfb..cbfe87f0f34ae7d81b7e10396018c99113b7a16d 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -67,6 +67,17 @@ int br_forward_finish(struct sk_buff *skb) static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { skb->dev = to->dev; + + if (unlikely(netpoll_tx_running(to->dev))) { + if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) + kfree_skb(skb); + else { + skb_push(skb, ETH_HLEN); + br_netpoll_send_skb(to, skb); + } + return; + } + NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, br_forward_finish); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 18b245e2c00edb642ef59c1aa2f5e03324a38790..c03d2c3ff03ed6cb99dc05d7a4cde5033d951190 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -147,14 +147,17 @@ static void del_nbp(struct net_bridge_port *p) list_del_rcu(&p->list); - rcu_assign_pointer(dev->br_port, NULL); + dev->priv_flags &= ~IFF_BRIDGE_PORT; + + netdev_rx_handler_unregister(dev); br_multicast_del_port(p); kobject_uevent(&p->kobj, KOBJ_REMOVE); kobject_del(&p->kobj); - br_netpoll_disable(br, dev); + br_netpoll_disable(p); + call_rcu(&p->rcu, destroy_nbp_rcu); } @@ -167,8 +170,6 @@ static void del_br(struct net_bridge *br, struct list_head *head) del_nbp(p); } - br_netpoll_cleanup(br->dev); - del_timer_sync(&br->gc_timer); br_sysfs_delbr(br->dev); @@ -400,7 +401,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) return -ELOOP; /* Device is already being bridged */ - if (dev->br_port != NULL) + if (br_port_exists(dev)) return -EBUSY; /* No bridging devices that dislike that (e.g. wireless) */ @@ -428,7 +429,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (err) goto err2; - rcu_assign_pointer(dev->br_port, p); + if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) + goto err3; + + err = netdev_rx_handler_register(dev, br_handle_frame, p); + if (err) + goto err3; + + dev->priv_flags |= IFF_BRIDGE_PORT; + dev_disable_lro(dev); list_add_rcu(&p->list, &br->port_list); @@ -448,9 +457,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) kobject_uevent(&p->kobj, KOBJ_ADD); - br_netpoll_enable(br, dev); - return 0; +err3: + sysfs_remove_link(br->ifobj, p->dev->name); err2: br_fdb_delete_by_port(br, p, 1); err1: @@ -467,9 +476,13 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) /* called with RTNL */ int br_del_if(struct net_bridge *br, struct net_device *dev) { - struct net_bridge_port *p = dev->br_port; + struct net_bridge_port *p; + + if (!br_port_exists(dev)) + return -EINVAL; - if (!p || p->br != br) + p = br_port_get(dev); + if (p->br != br) return -EINVAL; del_nbp(p); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index d36e700f7a26505b4828a08302b5bfec27b06483..826cd5221536cadd3b0d13dfec2e9d3639b92a93 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -27,8 +27,10 @@ static int br_pass_frame_up(struct sk_buff *skb) struct net_bridge *br = netdev_priv(brdev); struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); + u64_stats_update_begin(&brstats->syncp); brstats->rx_packets++; brstats->rx_bytes += skb->len; + u64_stats_update_end(&brstats->syncp); indev = skb->dev; skb->dev = brdev; @@ -37,11 +39,11 @@ static int br_pass_frame_up(struct sk_buff *skb) netif_receive_skb); } -/* note: already called with rcu_read_lock (preempt_disabled) */ +/* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; - struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); + struct net_bridge_port *p = br_port_get_rcu(skb->dev); struct net_bridge *br; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; @@ -108,13 +110,12 @@ int br_handle_frame_finish(struct sk_buff *skb) goto out; } -/* note: already called with rcu_read_lock (preempt_disabled) */ +/* note: already called with rcu_read_lock */ static int br_handle_local_finish(struct sk_buff *skb) { - struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); + struct net_bridge_port *p = br_port_get_rcu(skb->dev); - if (p) - br_fdb_update(p->br, p, eth_hdr(skb)->h_source); + br_fdb_update(p->br, p, eth_hdr(skb)->h_source); return 0; /* process further */ } @@ -131,15 +132,18 @@ static inline int is_link_local(const unsigned char *dest) } /* - * Called via br_handle_frame_hook. * Return NULL if skb is handled - * note: already called with rcu_read_lock (preempt_disabled) + * note: already called with rcu_read_lock */ -struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) +struct sk_buff *br_handle_frame(struct sk_buff *skb) { + struct net_bridge_port *p; const unsigned char *dest = eth_hdr(skb)->h_dest; int (*rhook)(struct sk_buff *skb); + if (skb->pkt_type == PACKET_LOOPBACK) + return skb; + if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; @@ -147,6 +151,8 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) if (!skb) return NULL; + p = br_port_get_rcu(skb->dev); + if (unlikely(is_link_local(dest))) { /* Pause frames shouldn't be passed up by driver anyway */ if (skb->protocol == htons(ETH_P_PAUSE)) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 27ae946363f16c6b709b5f4267e7ab5a574d5314..eb5b256ffc8801ff7e187c64ee3dde5da268f570 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1435,7 +1435,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, struct icmp6hdr *icmp6h; u8 nexthdr; unsigned len; - unsigned offset; + int offset; int err; if (!pskb_may_pull(skb, sizeof(*ip6h))) @@ -1728,13 +1728,9 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) int br_multicast_toggle(struct net_bridge *br, unsigned long val) { struct net_bridge_port *port; - int err = -ENOENT; + int err = 0; spin_lock(&br->multicast_lock); - if (!netif_running(br->dev)) - goto unlock; - - err = 0; if (br->multicast_disabled == !val) goto unlock; @@ -1742,6 +1738,9 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) if (br->multicast_disabled) goto unlock; + if (!netif_running(br->dev)) + goto unlock; + if (br->mdb) { if (br->mdb->old) { err = -EEXIST; diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8fb75f89c4aac6e79aa9f8f41cc3a226b176000b..2c911c0759c27bb6e4867b3bce9af0af3946d288 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -55,6 +55,9 @@ static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 0; static int brnf_filter_pppoe_tagged __read_mostly = 0; #else +#define brnf_call_iptables 1 +#define brnf_call_ip6tables 1 +#define brnf_call_arptables 1 #define brnf_filter_vlan_tagged 0 #define brnf_filter_pppoe_tagged 0 #endif @@ -117,26 +120,27 @@ void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; - atomic_set(&rt->u.dst.__refcnt, 1); - rt->u.dst.dev = br->dev; - rt->u.dst.path = &rt->u.dst; - rt->u.dst.metrics[RTAX_MTU - 1] = 1500; - rt->u.dst.flags = DST_NOXFRM; - rt->u.dst.ops = &fake_dst_ops; + atomic_set(&rt->dst.__refcnt, 1); + rt->dst.dev = br->dev; + rt->dst.path = &rt->dst; + rt->dst.metrics[RTAX_MTU - 1] = 1500; + rt->dst.flags = DST_NOXFRM; + rt->dst.ops = &fake_dst_ops; } static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) { - struct net_bridge_port *port = rcu_dereference(dev->br_port); - - return port ? &port->br->fake_rtable : NULL; + if (!br_port_exists(dev)) + return NULL; + return &br_port_get_rcu(dev)->br->fake_rtable; } static inline struct net_device *bridge_parent(const struct net_device *dev) { - struct net_bridge_port *port = rcu_dereference(dev->br_port); + if (!br_port_exists(dev)) + return NULL; - return port ? port->br->dev : NULL; + return br_port_get_rcu(dev)->br->dev; } static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) @@ -244,8 +248,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) kfree_skb(skb); return 0; } - dst_hold(&rt->u.dst); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set_noref(skb, &rt->dst); skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); @@ -396,8 +399,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) kfree_skb(skb); return 0; } - dst_hold(&rt->u.dst); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set_noref(skb, &rt->dst); } skb->dev = nf_bridge->physindev; @@ -545,25 +547,30 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, const struct net_device *out, int (*okfn)(struct sk_buff *)) { + struct net_bridge_port *p; + struct net_bridge *br; struct iphdr *iph; __u32 len = nf_bridge_encap_header_len(skb); if (unlikely(!pskb_may_pull(skb, len))) goto out; + p = br_port_get_rcu(in); + if (p == NULL) + goto out; + br = p->br; + if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) { -#ifdef CONFIG_SYSCTL - if (!brnf_call_ip6tables) + if (!brnf_call_ip6tables && !br->nf_call_ip6tables) return NF_ACCEPT; -#endif + nf_bridge_pull_encap_header_rcsum(skb); return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); } -#ifdef CONFIG_SYSCTL - if (!brnf_call_iptables) + + if (!brnf_call_iptables && !br->nf_call_iptables) return NF_ACCEPT; -#endif if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb)) @@ -719,12 +726,17 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb, const struct net_device *out, int (*okfn)(struct sk_buff *)) { + struct net_bridge_port *p; + struct net_bridge *br; struct net_device **d = (struct net_device **)(skb->cb); -#ifdef CONFIG_SYSCTL - if (!brnf_call_arptables) + p = br_port_get_rcu(out); + if (p == NULL) + return NF_ACCEPT; + br = p->br; + + if (!brnf_call_arptables && !br->nf_call_arptables) return NF_ACCEPT; -#endif if (skb->protocol != htons(ETH_P_ARP)) { if (!IS_VLAN_ARP(skb)) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index fe0a79018ab238e0b1adf826b87cf3a97184723b..4a6a378c84e357d06f45ae70fad7cd808c4e8bc8 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -120,10 +120,11 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) idx = 0; for_each_netdev(net, dev) { /* not a bridge port */ - if (dev->br_port == NULL || idx < cb->args[0]) + if (!br_port_exists(dev) || idx < cb->args[0]) goto skip; - if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid, + if (br_fill_ifinfo(skb, br_port_get(dev), + NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI) < 0) break; @@ -168,9 +169,9 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (!dev) return -ENODEV; - p = dev->br_port; - if (!p) + if (!br_port_exists(dev)) return -EINVAL; + p = br_port_get(dev); /* if kernel STP is running, don't allow changes */ if (p->br->stp_enabled == BR_KERNEL_STP) diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 717e1fd6133cbfcd3815e7f13d912597524faa9f..404d4e14c6a7702293521a668914765c3ec59daa 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -32,14 +32,15 @@ struct notifier_block br_device_notifier = { static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; - struct net_bridge_port *p = dev->br_port; + struct net_bridge_port *p = br_port_get(dev); struct net_bridge *br; int err; /* not a port of a bridge */ - if (p == NULL) + if (!br_port_exists(dev)) return NOTIFY_DONE; + p = br_port_get(dev); br = p->br; switch (event) { diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 0f4a74bc6a9b58e06b34194117577d7817b8be01..75c90edaf7db097432dd959069ed043a89d03262 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -15,6 +15,8 @@ #include #include +#include +#include #include #define BR_HASH_BITS 8 @@ -143,13 +145,23 @@ struct net_bridge_port #ifdef CONFIG_SYSFS char sysfs_name[IFNAMSIZ]; #endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *np; +#endif }; +#define br_port_get_rcu(dev) \ + ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data)) +#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data) +#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) + struct br_cpu_netstats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; }; struct net_bridge @@ -164,6 +176,9 @@ struct net_bridge unsigned long feature_mask; #ifdef CONFIG_BRIDGE_NETFILTER struct rtable fake_rtable; + bool nf_call_iptables; + bool nf_call_ip6tables; + bool nf_call_arptables; #endif unsigned long flags; #define BR_SET_MAC_ADDR 0x00000001 @@ -273,16 +288,41 @@ extern void br_dev_setup(struct net_device *dev); extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER -extern void br_netpoll_cleanup(struct net_device *dev); -extern void br_netpoll_enable(struct net_bridge *br, - struct net_device *dev); -extern void br_netpoll_disable(struct net_bridge *br, - struct net_device *dev); +static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) +{ + return br->dev->npinfo; +} + +static inline void br_netpoll_send_skb(const struct net_bridge_port *p, + struct sk_buff *skb) +{ + struct netpoll *np = p->np; + + if (np) + netpoll_send_skb(np, skb); +} + +extern int br_netpoll_enable(struct net_bridge_port *p); +extern void br_netpoll_disable(struct net_bridge_port *p); #else -#define br_netpoll_cleanup(br) -#define br_netpoll_enable(br, dev) -#define br_netpoll_disable(br, dev) +static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) +{ + return NULL; +} + +static inline void br_netpoll_send_skb(const struct net_bridge_port *p, + struct sk_buff *skb) +{ +} +static inline int br_netpoll_enable(struct net_bridge_port *p) +{ + return 0; +} + +static inline void br_netpoll_disable(struct net_bridge_port *p) +{ +} #endif /* br_fdb.c */ @@ -331,8 +371,7 @@ extern void br_features_recompute(struct net_bridge *br); /* br_input.c */ extern int br_handle_frame_finish(struct sk_buff *skb); -extern struct sk_buff *br_handle_frame(struct net_bridge_port *p, - struct sk_buff *skb); +extern struct sk_buff *br_handle_frame(struct sk_buff *skb); /* br_ioctl.c */ extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 217bd225a42f1390deb46605039941bd6ea81d1f..35cf27087b561d6e9955fd75b4b03213a6e9e8d8 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -131,18 +131,19 @@ void br_send_tcn_bpdu(struct net_bridge_port *p) /* * Called from llc. * - * NO locks, but rcu_read_lock (preempt_disabled) + * NO locks, but rcu_read_lock */ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, struct net_device *dev) { const unsigned char *dest = eth_hdr(skb)->h_dest; - struct net_bridge_port *p = rcu_dereference(dev->br_port); + struct net_bridge_port *p; struct net_bridge *br; const unsigned char *buf; - if (!p) + if (!br_port_exists(dev)) goto err; + p = br_port_get_rcu(dev); if (!pskb_may_pull(skb, 4)) goto err; diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 486b8f3861d2f67652015c74730fd2183110e03d..5c1e5559ebba23c9cd5a1f6e1c0d63280821284a 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -611,6 +611,73 @@ static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR, show_multicast_startup_query_interval, store_multicast_startup_query_interval); #endif +#ifdef CONFIG_BRIDGE_NETFILTER +static ssize_t show_nf_call_iptables( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->nf_call_iptables); +} + +static int set_nf_call_iptables(struct net_bridge *br, unsigned long val) +{ + br->nf_call_iptables = val ? true : false; + return 0; +} + +static ssize_t store_nf_call_iptables( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_nf_call_iptables); +} +static DEVICE_ATTR(nf_call_iptables, S_IRUGO | S_IWUSR, + show_nf_call_iptables, store_nf_call_iptables); + +static ssize_t show_nf_call_ip6tables( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->nf_call_ip6tables); +} + +static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val) +{ + br->nf_call_ip6tables = val ? true : false; + return 0; +} + +static ssize_t store_nf_call_ip6tables( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_nf_call_ip6tables); +} +static DEVICE_ATTR(nf_call_ip6tables, S_IRUGO | S_IWUSR, + show_nf_call_ip6tables, store_nf_call_ip6tables); + +static ssize_t show_nf_call_arptables( + struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%u\n", br->nf_call_arptables); +} + +static int set_nf_call_arptables(struct net_bridge *br, unsigned long val) +{ + br->nf_call_arptables = val ? true : false; + return 0; +} + +static ssize_t store_nf_call_arptables( + struct device *d, struct device_attribute *attr, const char *buf, + size_t len) +{ + return store_bridge_parm(d, buf, len, set_nf_call_arptables); +} +static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR, + show_nf_call_arptables, store_nf_call_arptables); +#endif static struct attribute *bridge_attrs[] = { &dev_attr_forward_delay.attr, @@ -644,6 +711,11 @@ static struct attribute *bridge_attrs[] = { &dev_attr_multicast_query_interval.attr, &dev_attr_multicast_query_response_interval.attr, &dev_attr_multicast_startup_query_interval.attr, +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + &dev_attr_nf_call_iptables.attr, + &dev_attr_nf_call_ip6tables.attr, + &dev_attr_nf_call_arptables.attr, #endif NULL }; diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c index 9e19166ba4534321ad2a9139960fd247dd958e36..46624bb6d9be5f0ca26b2b845f44f1a857499291 100644 --- a/net/bridge/netfilter/ebt_redirect.c +++ b/net/bridge/netfilter/ebt_redirect.c @@ -24,8 +24,9 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par) return EBT_DROP; if (par->hooknum != NF_BR_BROUTING) + /* rcu_read_lock()ed by nf_hook_slow */ memcpy(eth_hdr(skb)->h_dest, - par->in->br_port->br->dev->dev_addr, ETH_ALEN); + br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN); else memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index ae3c7cef1484ff16c0a5ff187e23ea4c799de775..26377e96fa1cf129d2b495c872647ca74281b5fd 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -177,8 +177,9 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, if (in) { strcpy(pm->physindev, in->name); /* If in isn't a bridge, then physindev==indev */ - if (in->br_port) - strcpy(pm->indev, in->br_port->br->dev->name); + if (br_port_exists(in)) + /* rcu_read_lock()ed by nf_hook_slow */ + strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name); else strcpy(pm->indev, in->name); } else @@ -187,7 +188,8 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, if (out) { /* If out exists, then out is a bridge port */ strcpy(pm->physoutdev, out->name); - strcpy(pm->outdev, out->br_port->br->dev->name); + /* rcu_read_lock()ed by nf_hook_slow */ + strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name); } else pm->outdev[0] = pm->physoutdev[0] = '\0'; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 59ca00e40dec2401b483bad4a7799775766e9f0d..bcc102e3be4daa2c5f09ee27710cc5ed03b096fa 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -140,11 +140,14 @@ ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h, return 1; if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT)) return 1; - if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check( - e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN)) + /* rcu_read_lock()ed by nf_hook_slow */ + if (in && br_port_exists(in) && + FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev), + EBT_ILOGICALIN)) return 1; - if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check( - e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT)) + if (out && br_port_exists(out) && + FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev), + EBT_ILOGICALOUT)) return 1; if (e->bitmask & EBT_SOURCEMAC) { diff --git a/net/caif/Kconfig b/net/caif/Kconfig index ed651786f16b5ab231ab74c3be054cfeab7183ff..529750da962442a54491a9a88070cb2b136af590 100644 --- a/net/caif/Kconfig +++ b/net/caif/Kconfig @@ -21,19 +21,18 @@ menuconfig CAIF See Documentation/networking/caif for a further explanation on how to use and configure CAIF. -if CAIF - config CAIF_DEBUG bool "Enable Debug" + depends on CAIF default n --- help --- Enable the inclusion of debug code in the CAIF stack. Be aware that doing this will impact performance. If unsure say N. - config CAIF_NETDEV tristate "CAIF GPRS Network device" + depends on CAIF default CAIF ---help--- Say Y if you will be using a CAIF based GPRS network device. @@ -41,5 +40,3 @@ config CAIF_NETDEV If you select to build it as a built-in then the main CAIF device must also be a built-in. If unsure say Y. - -endif diff --git a/net/caif/Makefile b/net/caif/Makefile index 34852af2595e24befcfdf78e2e6ec674874bbd37..f87481fb0e65e01ec278351ea489342bd83a7f3e 100644 --- a/net/caif/Makefile +++ b/net/caif/Makefile @@ -1,23 +1,13 @@ -ifeq ($(CONFIG_CAIF_DEBUG),1) -CAIF_DBG_FLAGS := -DDEBUG +ifeq ($(CONFIG_CAIF_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG endif -ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS) - caif-objs := caif_dev.o \ cfcnfg.o cfmuxl.o cfctrl.o \ cffrml.o cfveil.o cfdbgl.o\ cfserl.o cfdgml.o \ cfrfml.o cfvidl.o cfutill.o \ cfsrvl.o cfpkt_skbuff.o caif_config_util.o -clean-dirs:= .tmp_versions - -clean-files:= \ - Module.symvers \ - modules.order \ - *.cmd \ - *.o \ - *~ obj-$(CONFIG_CAIF) += caif.o obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c index 6f36580366f0c2c5ecd1347f72098aaf06d78e5a..76ae68303d3a3675265e61eccc213a0c4526d4e1 100644 --- a/net/caif/caif_config_util.c +++ b/net/caif/caif_config_util.c @@ -79,6 +79,11 @@ int connect_req_to_link_param(struct cfcnfg *cnfg, memcpy(l->u.utility.params, s->param.data, l->u.utility.paramlen); + break; + case CAIFPROTO_DEBUG: + l->linktype = CFCTRL_SRV_DBG; + l->endpoint = s->sockaddr.u.dbg.service; + l->chtype = s->sockaddr.u.dbg.type; break; default: return -EINVAL; diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index e2b86f1f5a47c229885cdff2da7f7ec167688377..0b586e9d1378fb209e2f377354938788abfd1ad8 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -255,7 +255,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, pref = CFPHYPREF_HIGH_BW; break; } - + dev_hold(dev); cfcnfg_add_phy_layer(get_caif_conf(), phy_type, dev, @@ -285,6 +285,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, caifd->layer.up->ctrlcmd(caifd->layer.up, _CAIF_CTRLCMD_PHYIF_DOWN_IND, caifd->layer.id); + might_sleep(); res = wait_event_interruptible_timeout(caifd->event, atomic_read(&caifd->in_use) == 0, TIMEOUT); @@ -300,6 +301,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, "Unregistering an active CAIF device: %s\n", __func__, dev->name); cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); + dev_put(dev); atomic_set(&caifd->state, what); break; @@ -326,7 +328,8 @@ struct cfcnfg *get_caif_conf(void) EXPORT_SYMBOL(get_caif_conf); int caif_connect_client(struct caif_connect_request *conn_req, - struct cflayer *client_layer) + struct cflayer *client_layer, int *ifindex, + int *headroom, int *tailroom) { struct cfctrl_link_param param; int ret; @@ -334,8 +337,9 @@ int caif_connect_client(struct caif_connect_request *conn_req, if (ret) return ret; /* Hook up the adaptation layer. */ - return cfcnfg_add_adaptation_layer(get_caif_conf(), - ¶m, client_layer); + return cfcnfg_add_adaptation_layer(get_caif_conf(), ¶m, + client_layer, ifindex, + headroom, tailroom); } EXPORT_SYMBOL(caif_connect_client); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 3d0e09584fae59611c1599503356eef19f7c0379..8ce9047861166740a17a2448cb1d4f668fba1d21 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -28,8 +28,8 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(AF_CAIF); -#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10) -#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100) +#define CAIF_DEF_SNDBUF (4096*10) +#define CAIF_DEF_RCVBUF (4096*100) /* * CAIF state is re-using the TCP socket states. @@ -76,6 +76,7 @@ struct caifsock { struct caif_connect_request conn_req; struct mutex readlock; struct dentry *debugfs_socket_dir; + int headroom, tailroom, maxframe; }; static int rx_flow_is_on(struct caifsock *cf_sk) @@ -594,27 +595,32 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, goto err; noblock = msg->msg_flags & MSG_DONTWAIT; - buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM; - - ret = -EMSGSIZE; - if (buffer_size > CAIF_MAX_PAYLOAD_SIZE) - goto err; - timeo = sock_sndtimeo(sk, noblock); timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 1, timeo, &ret); + if (ret) + goto err; ret = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto err; + /* Error if trying to write more than maximum frame size. */ + ret = -EMSGSIZE; + if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) + goto err; + + buffer_size = len + cf_sk->headroom + cf_sk->tailroom; + ret = -ENOMEM; skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); - if (!skb) + + if (!skb || skb_tailroom(skb) < buffer_size) goto err; - skb_reserve(skb, CAIF_NEEDED_HEADROOM); + + skb_reserve(skb, cf_sk->headroom); ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); @@ -645,7 +651,6 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, long timeo; err = -EOPNOTSUPP; - if (unlikely(msg->msg_flags&MSG_OOB)) goto out_err; @@ -662,8 +667,8 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, size = len-sent; - if (size > CAIF_MAX_PAYLOAD_SIZE) - size = CAIF_MAX_PAYLOAD_SIZE; + if (size > cf_sk->maxframe) + size = cf_sk->maxframe; /* If size is more than half of sndbuf, chop up message */ if (size > ((sk->sk_sndbuf >> 1) - 64)) @@ -673,14 +678,14 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, size = SKB_MAX_ALLOC; skb = sock_alloc_send_skb(sk, - size + CAIF_NEEDED_HEADROOM - + CAIF_NEEDED_TAILROOM, + size + cf_sk->headroom + + cf_sk->tailroom, msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out_err; - skb_reserve(skb, CAIF_NEEDED_HEADROOM); + skb_reserve(skb, cf_sk->headroom); /* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS @@ -821,17 +826,15 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); long timeo; int err; + int ifindex, headroom, tailroom; + struct net_device *dev; + lock_sock(sk); err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out; - err = -ESOCKTNOSUPPORT; - if (unlikely(!(sk->sk_type == SOCK_STREAM && - cf_sk->sk.sk_protocol == CAIFPROTO_AT) && - sk->sk_type != SOCK_SEQPACKET)) - goto out; switch (sock->state) { case SS_UNCONNECTED: /* Normal case, a fresh connect */ @@ -874,8 +877,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, sk_stream_kill_queues(&cf_sk->sk); err = -EINVAL; - if (addr_len != sizeof(struct sockaddr_caif) || - !uaddr) + if (addr_len != sizeof(struct sockaddr_caif)) goto out; memcpy(&cf_sk->conn_req.sockaddr, uaddr, @@ -888,12 +890,23 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, dbfs_atomic_inc(&cnt.num_connect_req); cf_sk->layer.receive = caif_sktrecv_cb; err = caif_connect_client(&cf_sk->conn_req, - &cf_sk->layer); + &cf_sk->layer, &ifindex, &headroom, &tailroom); if (err < 0) { cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; goto out; } + dev = dev_get_by_index(sock_net(sk), ifindex); + cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); + cf_sk->tailroom = tailroom; + cf_sk->maxframe = dev->mtu - (headroom + tailroom); + dev_put(dev); + if (cf_sk->maxframe < 1) { + pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n", + __func__, dev->mtu); + err = -ENODEV; + goto out; + } err = -EINPROGRESS; wait_connect: diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index df43f264d9fbac36633934b0081c239ab7830c3d..1c29189b344daa1d885b56f09a9d72871129b7ab 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ #define PHY_NAME_LEN 20 #define container_obj(layr) container_of(layr, struct cfcnfg, layer) +#define RFM_FRAGMENT_SIZE 4030 /* Information about CAIF physical interfaces held by Config Module in order * to manage physical interfaces @@ -41,6 +43,15 @@ struct cfcnfg_phyinfo { /* Information about the physical device */ struct dev_info dev_info; + + /* Interface index */ + int ifindex; + + /* Use Start of frame extension */ + bool use_stx; + + /* Use Start of frame checksum */ + bool use_fcs; }; struct cfcnfg { @@ -248,9 +259,20 @@ static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) { } +int protohead[CFCTRL_SRV_MASK] = { + [CFCTRL_SRV_VEI] = 4, + [CFCTRL_SRV_DATAGRAM] = 7, + [CFCTRL_SRV_UTIL] = 4, + [CFCTRL_SRV_RFM] = 3, + [CFCTRL_SRV_DBG] = 3, +}; + int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, struct cfctrl_link_param *param, - struct cflayer *adap_layer) + struct cflayer *adap_layer, + int *ifindex, + int *proto_head, + int *proto_tail) { struct cflayer *frml; if (adap_layer == NULL) { @@ -276,6 +298,14 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, param->phyid); caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == param->phyid); + + *ifindex = cnfg->phy_layers[param->phyid].ifindex; + *proto_head = + protohead[param->linktype]+ + (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0); + + *proto_tail = 2; + /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ cfctrl_enum_req(cnfg->ctrl, param->phyid); return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); @@ -297,6 +327,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, struct cfcnfg *cnfg = container_obj(layer); struct cflayer *servicel = NULL; struct cfcnfg_phyinfo *phyinfo; + struct net_device *netdev; + if (adapt_layer == NULL) { pr_debug("CAIF: %s(): link setup response " "but no client exist, send linkdown back\n", @@ -308,19 +340,15 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, caif_assert(cnfg != NULL); caif_assert(phyid != 0); phyinfo = &cnfg->phy_layers[phyid]; - caif_assert(phyinfo != NULL); caif_assert(phyinfo->id == phyid); caif_assert(phyinfo->phy_layer != NULL); caif_assert(phyinfo->phy_layer->id == phyid); - if (phyinfo != NULL && - phyinfo->phy_ref_count++ == 0 && - phyinfo->phy_layer != NULL && + phyinfo->phy_ref_count++; + if (phyinfo->phy_ref_count == 1 && phyinfo->phy_layer->modemcmd != NULL) { - caif_assert(phyinfo->phy_layer->id == phyid); phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, _CAIF_MODEMCMD_PHYIF_USEFULL); - } adapt_layer->id = channel_id; @@ -332,7 +360,9 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, servicel = cfdgml_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_RFM: - servicel = cfrfml_create(channel_id, &phyinfo->dev_info); + netdev = phyinfo->dev_info.dev; + servicel = cfrfml_create(channel_id, &phyinfo->dev_info, + netdev->mtu); break; case CFCTRL_SRV_UTIL: servicel = cfutill_create(channel_id, &phyinfo->dev_info); @@ -363,8 +393,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, void cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, - void *dev, struct cflayer *phy_layer, u16 *phyid, - enum cfcnfg_phy_preference pref, + struct net_device *dev, struct cflayer *phy_layer, + u16 *phyid, enum cfcnfg_phy_preference pref, bool fcs, bool stx) { struct cflayer *frml; @@ -418,6 +448,10 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, cnfg->phy_layers[*phyid].dev_info.dev = dev; cnfg->phy_layers[*phyid].phy_layer = phy_layer; cnfg->phy_layers[*phyid].phy_ref_count = 0; + cnfg->phy_layers[*phyid].ifindex = dev->ifindex; + cnfg->phy_layers[*phyid].use_stx = stx; + cnfg->phy_layers[*phyid].use_fcs = fcs; + phy_layer->type = phy_type; frml = cffrml_create(*phyid, fcs); if (!frml) { diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index fcfda98a5e6d3e5eb2f01aa85912acc6982a1e1f..563145fdc4c368de2c50c8d9d3510742a7d2e658 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -19,7 +19,7 @@ #ifdef CAIF_NO_LOOP static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt){ - return CAIF_FAILURE; + return -1; } #else static int handle_loop(struct cfctrl *ctrl, @@ -43,7 +43,7 @@ struct cflayer *cfctrl_create(void) memset(&dev_info, 0, sizeof(dev_info)); dev_info.id = 0xff; memset(this, 0, sizeof(*this)); - cfsrvl_init(&this->serv, 0, &dev_info); + cfsrvl_init(&this->serv, 0, &dev_info, false); atomic_set(&this->req_seq_no, 1); atomic_set(&this->rsp_seq_no, 1); this->serv.layer.receive = cfctrl_recv; @@ -395,7 +395,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) cmd = cmdrsp & CFCTRL_CMD_MASK; if (cmd != CFCTRL_CMD_LINK_ERR && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { - if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) + if (handle_loop(cfctrl, cmd, pkt) != 0) cmdrsp |= CFCTRL_ERR_BIT; } @@ -647,6 +647,6 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) default: break; } - return CAIF_SUCCESS; + return 0; } #endif diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c index ab6b6dc34cf84306fb806e0b144a107aa20e3421..676648cac8ddf3aa77f8659187b6aab8738388a0 100644 --- a/net/caif/cfdbgl.c +++ b/net/caif/cfdbgl.c @@ -22,7 +22,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) } caif_assert(offsetof(struct cfsrvl, layer) == 0); memset(dbg, 0, sizeof(struct cfsrvl)); - cfsrvl_init(dbg, channel_id, dev_info); + cfsrvl_init(dbg, channel_id, dev_info, false); dbg->layer.receive = cfdbgl_receive; dbg->layer.transmit = cfdbgl_transmit; snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c index 53194840ecb6cc8464b7a5cba4258dd27f0c6415..ed9d53aff28053e5dd98bbaf9eb1ad8d3bb6b591 100644 --- a/net/caif/cfdgml.c +++ b/net/caif/cfdgml.c @@ -17,6 +17,7 @@ #define DGM_FLOW_OFF 0x81 #define DGM_FLOW_ON 0x80 #define DGM_CTRL_PKT_SIZE 1 +#define DGM_MTU 1500 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt); @@ -30,7 +31,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info) } caif_assert(offsetof(struct cfsrvl, layer) == 0); memset(dgm, 0, sizeof(struct cfsrvl)); - cfsrvl_init(dgm, channel_id, dev_info); + cfsrvl_init(dgm, channel_id, dev_info, true); dgm->layer.receive = cfdgml_receive; dgm->layer.transmit = cfdgml_transmit; snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id); @@ -89,6 +90,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) if (!cfsrvl_ready(service, &ret)) return ret; + /* STE Modem cannot handle more than 1500 bytes datagrams */ + if (cfpkt_getlen(pkt) > DGM_MTU) + return -EMSGSIZE; + cfpkt_add_head(pkt, &zero, 4); /* Add info for MUX-layer to route the packet out. */ diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index a6fdf899741a5298fb92733f18ae359b4c0c3fbe..01f238ff23466912ad1c243b6ca88b8a0c998f8b 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -9,8 +9,8 @@ #include #include -#define PKT_PREFIX CAIF_NEEDED_HEADROOM -#define PKT_POSTFIX CAIF_NEEDED_TAILROOM +#define PKT_PREFIX 16 +#define PKT_POSTFIX 2 #define PKT_LEN_WHEN_EXTENDING 128 #define PKT_ERROR(pkt, errmsg) do { \ cfpkt_priv(pkt)->erronous = true; \ @@ -338,7 +338,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, u16 dstlen; u16 createlen; if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { - cfpkt_destroy(addpkt); return dstpkt; } if (expectlen > addlen) diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index fd27b172fb5d444ffdf5664312f0b0c86b4836cd..eb1602022ac0643af4e4ad655c3de4ae42a200f4 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -7,102 +7,304 @@ #include #include #include +#include #include #include #include -#define container_obj(layr) container_of(layr, struct cfsrvl, layer) - +#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer) #define RFM_SEGMENTATION_BIT 0x01 -#define RFM_PAYLOAD 0x00 -#define RFM_CMD_BIT 0x80 -#define RFM_FLOW_OFF 0x81 -#define RFM_FLOW_ON 0x80 -#define RFM_SET_PIN 0x82 -#define RFM_CTRL_PKT_SIZE 1 +#define RFM_HEAD_SIZE 7 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); -static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl); -struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info) +struct cfrfml { + struct cfsrvl serv; + struct cfpkt *incomplete_frm; + int fragment_size; + u8 seghead[6]; + u16 pdu_size; + /* Protects serialized processing of packets */ + spinlock_t sync; +}; + +static void cfrfml_release(struct kref *kref) +{ + struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref); + struct cfrfml *rfml = container_obj(&srvl->layer); + + if (rfml->incomplete_frm) + cfpkt_destroy(rfml->incomplete_frm); + + kfree(srvl); +} + +struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info, + int mtu_size) { - struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); - if (!rfm) { + int tmp; + struct cfrfml *this = + kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); + + if (!this) { pr_warning("CAIF: %s(): Out of memory\n", __func__); return NULL; } - caif_assert(offsetof(struct cfsrvl, layer) == 0); - memset(rfm, 0, sizeof(struct cfsrvl)); - cfsrvl_init(rfm, channel_id, dev_info); - rfm->layer.modemcmd = cfservl_modemcmd; - rfm->layer.receive = cfrfml_receive; - rfm->layer.transmit = cfrfml_transmit; - snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id); - return &rfm->layer; + + cfsrvl_init(&this->serv, channel_id, dev_info, false); + this->serv.release = cfrfml_release; + this->serv.layer.receive = cfrfml_receive; + this->serv.layer.transmit = cfrfml_transmit; + + /* Round down to closest multiple of 16 */ + tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16; + tmp *= 16; + + this->fragment_size = tmp; + spin_lock_init(&this->sync); + snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ, + "rfm%d", channel_id); + + return &this->serv.layer; } -static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) +static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead, + struct cfpkt *pkt, int *err) { - return -EPROTO; + struct cfpkt *tmppkt; + *err = -EPROTO; + /* n-th but not last segment */ + + if (cfpkt_extr_head(pkt, seghead, 6) < 0) + return NULL; + + /* Verify correct header */ + if (memcmp(seghead, rfml->seghead, 6) != 0) + return NULL; + + tmppkt = cfpkt_append(rfml->incomplete_frm, pkt, + rfml->pdu_size + RFM_HEAD_SIZE); + + /* If cfpkt_append failes input pkts are not freed */ + *err = -ENOMEM; + if (tmppkt == NULL) + return NULL; + + *err = 0; + return tmppkt; } static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) { u8 tmp; bool segmented; - int ret; + int err; + u8 seghead[6]; + struct cfrfml *rfml; + struct cfpkt *tmppkt = NULL; + caif_assert(layr->up != NULL); caif_assert(layr->receive != NULL); + rfml = container_obj(layr); + spin_lock(&rfml->sync); + + err = -EPROTO; + if (cfpkt_extr_head(pkt, &tmp, 1) < 0) + goto out; + segmented = tmp & RFM_SEGMENTATION_BIT; + + if (segmented) { + if (rfml->incomplete_frm == NULL) { + /* Initial Segment */ + if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0) + goto out; + + rfml->pdu_size = get_unaligned_le16(rfml->seghead+4); + + if (cfpkt_erroneous(pkt)) + goto out; + rfml->incomplete_frm = pkt; + pkt = NULL; + } else { + + tmppkt = rfm_append(rfml, seghead, pkt, &err); + if (tmppkt == NULL) + goto out; + + if (cfpkt_erroneous(tmppkt)) + goto out; + + rfml->incomplete_frm = tmppkt; + + + if (cfpkt_erroneous(tmppkt)) + goto out; + } + err = 0; + goto out; + } + + if (rfml->incomplete_frm) { + + /* Last Segment */ + tmppkt = rfm_append(rfml, seghead, pkt, &err); + if (tmppkt == NULL) + goto out; + + if (cfpkt_erroneous(tmppkt)) + goto out; + + rfml->incomplete_frm = NULL; + pkt = tmppkt; + tmppkt = NULL; + + /* Verify that length is correct */ + err = EPROTO; + if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1) + goto out; + } + + err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt); + +out: + + if (err != 0) { + if (tmppkt) + cfpkt_destroy(tmppkt); + if (pkt) + cfpkt_destroy(pkt); + if (rfml->incomplete_frm) + cfpkt_destroy(rfml->incomplete_frm); + rfml->incomplete_frm = NULL; + + pr_info("CAIF: %s(): " + "Connection error %d triggered on RFM link\n", + __func__, err); + + /* Trigger connection error upon failure.*/ + layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, + rfml->serv.dev_info.id); + } + spin_unlock(&rfml->sync); + return err; +} + + +static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) +{ + caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); + + /* Add info for MUX-layer to route the packet out. */ + cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; /* - * RFM is taking care of segmentation and stripping of - * segmentation bit. + * To optimize alignment, we add up the size of CAIF header before + * payload. */ - if (cfpkt_extr_head(pkt, &tmp, 1) < 0) { - pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); - cfpkt_destroy(pkt); - return -EPROTO; - } - segmented = tmp & RFM_SEGMENTATION_BIT; - caif_assert(!segmented); + cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE; + cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info; - ret = layr->up->receive(layr->up, pkt); - return ret; + return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt); } static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) { - u8 tmp = 0; - int ret; - struct cfsrvl *service = container_obj(layr); + int err; + u8 seg; + u8 head[6]; + struct cfpkt *rearpkt = NULL; + struct cfpkt *frontpkt = pkt; + struct cfrfml *rfml = container_obj(layr); caif_assert(layr->dn != NULL); caif_assert(layr->dn->transmit != NULL); - if (!cfsrvl_ready(service, &ret)) - return ret; + if (!cfsrvl_ready(&rfml->serv, &err)) + return err; + + err = -EPROTO; + if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1) + goto out; + + err = 0; + if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE) + err = cfpkt_peek_head(pkt, head, 6); + + if (err < 0) + goto out; + + while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) { + + seg = 1; + err = -EPROTO; + + if (cfpkt_add_head(frontpkt, &seg, 1) < 0) + goto out; + /* + * On OOM error cfpkt_split returns NULL. + * + * NOTE: Segmented pdu is not correctly aligned. + * This has negative performance impact. + */ + + rearpkt = cfpkt_split(frontpkt, rfml->fragment_size); + if (rearpkt == NULL) + goto out; + + err = cfrfml_transmit_segment(rfml, frontpkt); + + if (err != 0) + goto out; + frontpkt = rearpkt; + rearpkt = NULL; + + err = -ENOMEM; + if (frontpkt == NULL) + goto out; + err = -EPROTO; + if (cfpkt_add_head(frontpkt, head, 6) < 0) + goto out; - if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { - pr_err("CAIF: %s():Packet too large - size=%d\n", - __func__, cfpkt_getlen(pkt)); - return -EOVERFLOW; } - if (cfpkt_add_head(pkt, &tmp, 1) < 0) { - pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); - return -EPROTO; + + seg = 0; + err = -EPROTO; + + if (cfpkt_add_head(frontpkt, &seg, 1) < 0) + goto out; + + err = cfrfml_transmit_segment(rfml, frontpkt); + + frontpkt = NULL; +out: + + if (err != 0) { + pr_info("CAIF: %s(): " + "Connection error %d triggered on RFM link\n", + __func__, err); + /* Trigger connection error upon failure.*/ + + layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, + rfml->serv.dev_info.id); + + if (rearpkt) + cfpkt_destroy(rearpkt); + + if (frontpkt && frontpkt != pkt) { + + cfpkt_destroy(frontpkt); + /* + * Socket layer will free the original packet, + * but this packet may already be sent and + * freed. So we have to return 0 in this case + * to avoid socket layer to re-free this packet. + * The return of shutdown indication will + * cause connection to be invalidated anyhow. + */ + err = 0; + } } - /* Add info for MUX-layer to route the packet out. */ - cfpkt_info(pkt)->channel_id = service->layer.id; - /* - * To optimize alignment, we add up the size of CAIF header before - * payload. - */ - cfpkt_info(pkt)->hdr_len = 1; - cfpkt_info(pkt)->dev_info = &service->dev_info; - ret = layr->dn->transmit(layr->dn, pkt); - if (ret < 0) - cfpkt_extr_head(pkt, &tmp, 1); - return ret; + return err; } diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 965c5baace40b10c401c8fff3ecaaacb612c0aa6..a11fbd68a13dec6debe45e647122523400f9f5a1 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -14,7 +14,8 @@ #define container_obj(layr) ((struct cfserl *) layr) #define CFSERL_STX 0x02 -#define CAIF_MINIUM_PACKET_SIZE 4 +#define SERIAL_MINIUM_PACKET_SIZE 4 +#define SERIAL_MAX_FRAMESIZE 4096 struct cfserl { struct cflayer layer; struct cfpkt *incomplete_frm; @@ -119,8 +120,8 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) /* * Frame error handling */ - if (expectlen < CAIF_MINIUM_PACKET_SIZE - || expectlen > CAIF_MAX_FRAMESIZE) { + if (expectlen < SERIAL_MINIUM_PACKET_SIZE + || expectlen > SERIAL_MAX_FRAMESIZE) { if (!layr->usestx) { if (pkt != NULL) cfpkt_destroy(pkt); diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index 6e5b7079a68424ff414a5f33d0500267b59d489e..f40939a91211b8e2b6ba985f2e1562064723e681 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -24,8 +24,10 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid) { struct cfsrvl *service = container_obj(layr); + caif_assert(layr->up != NULL); caif_assert(layr->up->ctrlcmd != NULL); + switch (ctrl) { case CAIF_CTRLCMD_INIT_RSP: service->open = true; @@ -89,9 +91,14 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) { struct cfsrvl *service = container_obj(layr); + caif_assert(layr != NULL); caif_assert(layr->dn != NULL); caif_assert(layr->dn->transmit != NULL); + + if (!service->supports_flowctrl) + return 0; + switch (ctrl) { case CAIF_MODEMCMD_FLOW_ON_REQ: { @@ -152,9 +159,17 @@ void cfservl_destroy(struct cflayer *layer) kfree(layer); } +void cfsrvl_release(struct kref *kref) +{ + struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); + kfree(service); +} + void cfsrvl_init(struct cfsrvl *service, - u8 channel_id, - struct dev_info *dev_info) + u8 channel_id, + struct dev_info *dev_info, + bool supports_flowctrl + ) { caif_assert(offsetof(struct cfsrvl, layer) == 0); service->open = false; @@ -164,14 +179,11 @@ void cfsrvl_init(struct cfsrvl *service, service->layer.ctrlcmd = cfservl_ctrlcmd; service->layer.modemcmd = cfservl_modemcmd; service->dev_info = *dev_info; + service->supports_flowctrl = supports_flowctrl; + service->release = cfsrvl_release; kref_init(&service->ref); } -void cfsrvl_release(struct kref *kref) -{ - struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); - kfree(service); -} bool cfsrvl_ready(struct cfsrvl *service, int *err) { diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c index 5fd2c9ea8b42a8dc99e570326ebb60c7f50f2667..02795aff57a426fbbd1080214f46c5a1fb569d34 100644 --- a/net/caif/cfutill.c +++ b/net/caif/cfutill.c @@ -31,7 +31,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info) } caif_assert(offsetof(struct cfsrvl, layer) == 0); memset(util, 0, sizeof(struct cfsrvl)); - cfsrvl_init(util, channel_id, dev_info); + cfsrvl_init(util, channel_id, dev_info, true); util->layer.receive = cfutill_receive; util->layer.transmit = cfutill_transmit; snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1"); @@ -90,12 +90,6 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt) if (!cfsrvl_ready(service, &ret)) return ret; - if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { - pr_err("CAIF: %s(): packet too large size=%d\n", - __func__, cfpkt_getlen(pkt)); - return -EOVERFLOW; - } - cfpkt_add_head(pkt, &zero, 1); /* Add info for MUX-layer to route the packet out. */ info = cfpkt_info(pkt); diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index e04f7d964e83ea8d0c88308bb0fcd2e3dd9dfbd6..77cc09faac9a305cfa4a05554ad95bea6d52e08e 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c @@ -30,7 +30,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) } caif_assert(offsetof(struct cfsrvl, layer) == 0); memset(vei, 0, sizeof(struct cfsrvl)); - cfsrvl_init(vei, channel_id, dev_info); + cfsrvl_init(vei, channel_id, dev_info, true); vei->layer.receive = cfvei_receive; vei->layer.transmit = cfvei_transmit; snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); @@ -84,11 +84,6 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) return ret; caif_assert(layr->dn != NULL); caif_assert(layr->dn->transmit != NULL); - if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { - pr_warning("CAIF: %s(): Packet too large - size=%d\n", - __func__, cfpkt_getlen(pkt)); - return -EOVERFLOW; - } if (cfpkt_add_head(pkt, &tmp, 1) < 0) { pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c index 89ad4ea239f1f7105394787fca4af8d19d410b84..ada6ee2d48f50a3ab4df69f102778535c4292eba 100644 --- a/net/caif/cfvidl.c +++ b/net/caif/cfvidl.c @@ -27,7 +27,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info) caif_assert(offsetof(struct cfsrvl, layer) == 0); memset(vid, 0, sizeof(struct cfsrvl)); - cfsrvl_init(vid, channel_id, dev_info); + cfsrvl_init(vid, channel_id, dev_info, false); vid->layer.receive = cfvidl_receive; vid->layer.transmit = cfvidl_transmit; snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1"); diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 610966abe2dcf667bd3303a325bc71436915ce5e..4293e190ec5328f1da7f40065c9eefcdff5d1f66 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -23,7 +23,7 @@ #include /* GPRS PDP connection has MTU to 1500 */ -#define SIZE_MTU 1500 +#define GPRS_PDP_MTU 1500 /* 5 sec. connect timeout */ #define CONNECT_TIMEOUT (5 * HZ) #define CAIF_NET_DEFAULT_QUEUE_LEN 500 @@ -232,6 +232,8 @@ static int chnl_net_open(struct net_device *dev) { struct chnl_net *priv = NULL; int result = -1; + int llifindex, headroom, tailroom, mtu; + struct net_device *lldev; ASSERT_RTNL(); priv = netdev_priv(dev); if (!priv) { @@ -241,41 +243,88 @@ static int chnl_net_open(struct net_device *dev) if (priv->state != CAIF_CONNECTING) { priv->state = CAIF_CONNECTING; - result = caif_connect_client(&priv->conn_req, &priv->chnl); + result = caif_connect_client(&priv->conn_req, &priv->chnl, + &llifindex, &headroom, &tailroom); if (result != 0) { - priv->state = CAIF_DISCONNECTED; pr_debug("CAIF: %s(): err: " "Unable to register and open device," " Err:%d\n", __func__, result); - return result; + goto error; + } + + lldev = dev_get_by_index(dev_net(dev), llifindex); + + if (lldev == NULL) { + pr_debug("CAIF: %s(): no interface?\n", __func__); + result = -ENODEV; + goto error; + } + + dev->needed_tailroom = tailroom + lldev->needed_tailroom; + dev->hard_header_len = headroom + lldev->hard_header_len + + lldev->needed_tailroom; + + /* + * MTU, head-room etc is not know before we have a + * CAIF link layer device available. MTU calculation may + * override initial RTNL configuration. + * MTU is minimum of current mtu, link layer mtu pluss + * CAIF head and tail, and PDP GPRS contexts max MTU. + */ + mtu = min_t(int, dev->mtu, lldev->mtu - (headroom + tailroom)); + mtu = min_t(int, GPRS_PDP_MTU, mtu); + dev_set_mtu(dev, mtu); + dev_put(lldev); + + if (mtu < 100) { + pr_warning("CAIF: %s(): " + "CAIF Interface MTU too small (%d)\n", + __func__, mtu); + result = -ENODEV; + goto error; } } + rtnl_unlock(); /* Release RTNL lock during connect wait */ + result = wait_event_interruptible_timeout(priv->netmgmt_wq, priv->state != CAIF_CONNECTING, CONNECT_TIMEOUT); + rtnl_lock(); + if (result == -ERESTARTSYS) { pr_debug("CAIF: %s(): wait_event_interruptible" " woken by a signal\n", __func__); - return -ERESTARTSYS; + result = -ERESTARTSYS; + goto error; } + if (result == 0) { pr_debug("CAIF: %s(): connect timeout\n", __func__); caif_disconnect_client(&priv->chnl); priv->state = CAIF_DISCONNECTED; pr_debug("CAIF: %s(): state disconnected\n", __func__); - return -ETIMEDOUT; + result = -ETIMEDOUT; + goto error; } if (priv->state != CAIF_CONNECTED) { pr_debug("CAIF: %s(): connect failed\n", __func__); - return -ECONNREFUSED; + result = -ECONNREFUSED; + goto error; } pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__); return 0; + +error: + caif_disconnect_client(&priv->chnl); + priv->state = CAIF_DISCONNECTED; + pr_debug("CAIF: %s(): state disconnected\n", __func__); + return result; + } static int chnl_net_stop(struct net_device *dev) @@ -321,9 +370,7 @@ static void ipcaif_net_setup(struct net_device *dev) dev->destructor = free_netdev; dev->flags |= IFF_NOARP; dev->flags |= IFF_POINTOPOINT; - dev->needed_headroom = CAIF_NEEDED_HEADROOM; - dev->needed_tailroom = CAIF_NEEDED_TAILROOM; - dev->mtu = SIZE_MTU; + dev->mtu = GPRS_PDP_MTU; dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN; priv = netdev_priv(dev); diff --git a/net/can/raw.c b/net/can/raw.c index da99cf153b33737ce45a06e71fc89037b96096b3..a10e3338f084aaf15eb73b40d46f1f28e96aaaf7 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -436,14 +436,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (count > 1) { /* filter does not fit into dfilter => alloc space */ - filter = kmalloc(optlen, GFP_KERNEL); - if (!filter) - return -ENOMEM; - - if (copy_from_user(filter, optval, optlen)) { - kfree(filter); - return -EFAULT; - } + filter = memdup_user(optval, optlen); + if (IS_ERR(filter)) + return PTR_ERR(filter); } else if (count == 1) { if (copy_from_user(&sfilter, optval, sizeof(sfilter))) return -EFAULT; @@ -655,6 +650,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, err = sock_tx_timestamp(msg, sk, skb_tx(skb)); if (err < 0) goto free_skb; + + /* to be able to check the received tx sock reference in raw_rcv() */ + skb_tx(skb)->prevent_sk_orphan = 1; + skb->dev = dev; skb->sk = sk; diff --git a/net/compat.c b/net/compat.c index ec24d9edb0258ecda760deaf80eaa211f844feef..63d260e8147290520212f50f1df27e638af9fe9e 100644 --- a/net/compat.c +++ b/net/compat.c @@ -81,7 +81,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, int tot_len; if (kern_msg->msg_namelen) { - if (mode==VERIFY_READ) { + if (mode == VERIFY_READ) { int err = move_addr_to_kernel(kern_msg->msg_name, kern_msg->msg_namelen, kern_address); @@ -354,7 +354,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname, static int do_set_sock_timeout(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { - struct compat_timeval __user *up = (struct compat_timeval __user *) optval; + struct compat_timeval __user *up = (struct compat_timeval __user *)optval; struct timeval ktime; mm_segment_t old_fs; int err; @@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level, return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); - err = sock_setsockopt(sock, level, optname, (char *) &ktime, sizeof(ktime)); + err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); set_fs(old_fs); return err; @@ -389,11 +389,10 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, char __user *optval, unsigned int optlen) { int err; - struct socket *sock; + struct socket *sock = sockfd_lookup(fd, &err); - if ((sock = sockfd_lookup(fd, &err))!=NULL) - { - err = security_socket_setsockopt(sock,level,optname); + if (sock) { + err = security_socket_setsockopt(sock, level, optname); if (err) { sockfd_put(sock); return err; @@ -453,7 +452,7 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname, int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { struct compat_timeval __user *ctv = - (struct compat_timeval __user*) userstamp; + (struct compat_timeval __user *) userstamp; int err = -ENOENT; struct timeval tv; @@ -477,7 +476,7 @@ EXPORT_SYMBOL(compat_sock_get_timestamp); int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) { struct compat_timespec __user *ctv = - (struct compat_timespec __user*) userstamp; + (struct compat_timespec __user *) userstamp; int err = -ENOENT; struct timespec ts; @@ -502,12 +501,10 @@ asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen) { int err; - struct socket *sock; + struct socket *sock = sockfd_lookup(fd, &err); - if ((sock = sockfd_lookup(fd, &err))!=NULL) - { - err = security_socket_getsockopt(sock, level, - optname); + if (sock) { + err = security_socket_getsockopt(sock, level, optname); if (err) { sockfd_put(sock); return err; @@ -531,7 +528,7 @@ struct compat_group_req { __u32 gr_interface; struct __kernel_sockaddr_storage gr_group __attribute__ ((aligned(4))); -} __attribute__ ((packed)); +} __packed; struct compat_group_source_req { __u32 gsr_interface; @@ -539,7 +536,7 @@ struct compat_group_source_req { __attribute__ ((aligned(4))); struct __kernel_sockaddr_storage gsr_source __attribute__ ((aligned(4))); -} __attribute__ ((packed)); +} __packed; struct compat_group_filter { __u32 gf_interface; @@ -549,7 +546,7 @@ struct compat_group_filter { __u32 gf_numsrc; struct __kernel_sockaddr_storage gf_slist[1] __attribute__ ((aligned(4))); -} __attribute__ ((packed)); +} __packed; #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \ sizeof(struct __kernel_sockaddr_storage)) @@ -557,7 +554,7 @@ struct compat_group_filter { int compat_mc_setsockopt(struct sock *sock, int level, int optname, char __user *optval, unsigned int optlen, - int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int)) + int (*setsockopt)(struct sock *, int, int, char __user *, unsigned int)) { char __user *koptval = optval; int koptlen = optlen; @@ -640,12 +637,11 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, } return setsockopt(sock, level, optname, koptval, koptlen); } - EXPORT_SYMBOL(compat_mc_setsockopt); int compat_mc_getsockopt(struct sock *sock, int level, int optname, char __user *optval, int __user *optlen, - int (*getsockopt)(struct sock *,int,int,char __user *,int __user *)) + int (*getsockopt)(struct sock *, int, int, char __user *, int __user *)) { struct compat_group_filter __user *gf32 = (void *)optval; struct group_filter __user *kgf; @@ -681,7 +677,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, __put_user(interface, &kgf->gf_interface) || __put_user(fmode, &kgf->gf_fmode) || __put_user(numsrc, &kgf->gf_numsrc) || - copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group))) + copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group))) return -EFAULT; err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); @@ -714,21 +710,22 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, copylen = numsrc * sizeof(gf32->gf_slist[0]); if (copylen > klen) copylen = klen; - if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) + if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) return -EFAULT; } return err; } - EXPORT_SYMBOL(compat_mc_getsockopt); /* Argument list sizes for compat_sys_socketcall */ #define AL(x) ((x) * sizeof(u32)) -static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), - AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), - AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), - AL(4),AL(5)}; +static unsigned char nas[20] = { + AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), + AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), + AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), + AL(4), AL(5) +}; #undef AL asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) @@ -827,7 +824,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) compat_ptr(a[4]), compat_ptr(a[5])); break; case SYS_SHUTDOWN: - ret = sys_shutdown(a0,a1); + ret = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: ret = compat_sys_setsockopt(a0, a1, a[2], diff --git a/net/core/Makefile b/net/core/Makefile index 51c3eec850ef426bc2614f309ad08182b595df69..8a04dd22cf77c706c8613edd9d0c64bf57fa3613 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -18,4 +18,4 @@ obj-$(CONFIG_NET_DMA) += user_dma.o obj-$(CONFIG_FIB_RULES) += fib_rules.o obj-$(CONFIG_TRACEPOINTS) += net-traces.o obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o - +obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o diff --git a/net/core/datagram.c b/net/core/datagram.c index f5b6f43a4c2e61e34e0d02053458b9a8c4dfdf89..251997a9548362c5ebc8a0a34842971a0198539a 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -219,6 +219,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, err); } +EXPORT_SYMBOL(skb_recv_datagram); void skb_free_datagram(struct sock *sk, struct sk_buff *skb) { @@ -288,7 +289,6 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) return err; } - EXPORT_SYMBOL(skb_kill_datagram); /** @@ -373,6 +373,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, fault: return -EFAULT; } +EXPORT_SYMBOL(skb_copy_datagram_iovec); /** * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. @@ -716,6 +717,7 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, fault: return -EFAULT; } +EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); /** * datagram_poll - generic datagram poll @@ -770,8 +772,4 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, return mask; } - EXPORT_SYMBOL(datagram_poll); -EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); -EXPORT_SYMBOL(skb_copy_datagram_iovec); -EXPORT_SYMBOL(skb_recv_datagram); diff --git a/net/core/dev.c b/net/core/dev.c index 1f466e82ac339359ffa0587f0e1e1f324c0f5dd7..e1c1cdcc2bb0429d65fea408625e9dec0c317a59 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -101,8 +101,6 @@ #include #include #include -#include -#include #include #include #include @@ -803,35 +801,31 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) EXPORT_SYMBOL(dev_getfirstbyhwtype); /** - * dev_get_by_flags - find any device with given flags + * dev_get_by_flags_rcu - find any device with given flags * @net: the applicable net namespace * @if_flags: IFF_* values * @mask: bitmask of bits in if_flags to check * * Search for any interface with the given flags. Returns NULL if a device - * is not found or a pointer to the device. The device returned has - * had a reference added and the pointer is safe until the user calls - * dev_put to indicate they have finished with it. + * is not found or a pointer to the device. Must be called inside + * rcu_read_lock(), and result refcount is unchanged. */ -struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, +struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, unsigned short mask) { struct net_device *dev, *ret; ret = NULL; - rcu_read_lock(); for_each_netdev_rcu(net, dev) { if (((dev->flags ^ if_flags) & mask) == 0) { - dev_hold(dev); ret = dev; break; } } - rcu_read_unlock(); return ret; } -EXPORT_SYMBOL(dev_get_by_flags); +EXPORT_SYMBOL(dev_get_by_flags_rcu); /** * dev_valid_name - check if name is okay for network device @@ -1542,7 +1536,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) if (net_ratelimit()) printk(KERN_CRIT "protocol %04x is " "buggy, dev %s\n", - skb2->protocol, dev->name); + ntohs(skb2->protocol), + dev->name); skb_reset_network_header(skb2); } @@ -1924,6 +1919,22 @@ static inline void skb_orphan_try(struct sk_buff *skb) } } +/* + * Returns true if either: + * 1. skb has frag_list and the device doesn't support FRAGLIST, or + * 2. skb is fragmented and the device does not support SG, or if + * at least one of fragments is in highmem and device does not + * support DMA from it. + */ +static inline int skb_needs_linearize(struct sk_buff *skb, + struct net_device *dev) +{ + return skb_is_nonlinear(skb) && + ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || + (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || + illegal_highdma(dev, skb)))); +} + int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { @@ -1948,6 +1959,22 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, goto out_kfree_skb; if (skb->next) goto gso; + } else { + if (skb_needs_linearize(skb, dev) && + __skb_linearize(skb)) + goto out_kfree_skb; + + /* If packet is not checksummed and device does not + * support checksumming for this protocol, complete + * checksumming here. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_set_transport_header(skb, skb->csum_start - + skb_headroom(skb)); + if (!dev_can_checksum(dev, skb) && + skb_checksum_help(skb)) + goto out_kfree_skb; + } } rc = ops->ndo_start_xmit(skb, dev); @@ -2063,14 +2090,24 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct netdev_queue *txq) { spinlock_t *root_lock = qdisc_lock(q); + bool contended = qdisc_is_running(q); int rc; + /* + * Heuristic to force contended enqueues to serialize on a + * separate lock before trying to get qdisc main lock. + * This permits __QDISC_STATE_RUNNING owner to get the lock more often + * and dequeue packets faster. + */ + if (unlikely(contended)) + spin_lock(&q->busylock); + spin_lock(root_lock); if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { kfree_skb(skb); rc = NET_XMIT_DROP; } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && - !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { + qdisc_run_begin(q)) { /* * This is a work-conserving queue; there are no old skbs * waiting to be sent out; and the qdisc is not running - @@ -2079,37 +2116,33 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) skb_dst_force(skb); __qdisc_update_bstats(q, skb->len); - if (sch_direct_xmit(skb, q, dev, txq, root_lock)) + if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { + if (unlikely(contended)) { + spin_unlock(&q->busylock); + contended = false; + } __qdisc_run(q); - else - clear_bit(__QDISC_STATE_RUNNING, &q->state); + } else + qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { skb_dst_force(skb); rc = qdisc_enqueue_root(skb, q); - qdisc_run(q); + if (qdisc_run_begin(q)) { + if (unlikely(contended)) { + spin_unlock(&q->busylock); + contended = false; + } + __qdisc_run(q); + } } spin_unlock(root_lock); - + if (unlikely(contended)) + spin_unlock(&q->busylock); return rc; } -/* - * Returns true if either: - * 1. skb has frag_list and the device doesn't support FRAGLIST, or - * 2. skb is fragmented and the device does not support SG, or if - * at least one of fragments is in highmem and device does not - * support DMA from it. - */ -static inline int skb_needs_linearize(struct sk_buff *skb, - struct net_device *dev) -{ - return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || - (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || - illegal_highdma(dev, skb))); -} - /** * dev_queue_xmit - transmit a buffer * @skb: buffer to transmit @@ -2142,25 +2175,6 @@ int dev_queue_xmit(struct sk_buff *skb) struct Qdisc *q; int rc = -ENOMEM; - /* GSO will handle the following emulations directly. */ - if (netif_needs_gso(dev, skb)) - goto gso; - - /* Convert a paged skb to linear, if required */ - if (skb_needs_linearize(skb, dev) && __skb_linearize(skb)) - goto out_kfree_skb; - - /* If packet is not checksummed and device does not support - * checksumming for this protocol, complete checksumming here. - */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - skb_set_transport_header(skb, skb->csum_start - - skb_headroom(skb)); - if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb)) - goto out_kfree_skb; - } - -gso: /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ @@ -2219,7 +2233,6 @@ int dev_queue_xmit(struct sk_buff *skb) rc = -ENETDOWN; rcu_read_unlock_bh(); -out_kfree_skb: kfree_skb(skb); return rc; out: @@ -2604,70 +2617,14 @@ static inline int deliver_skb(struct sk_buff *skb, return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } -#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) - -#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) +#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ + (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) /* This hook is defined here for ATM LANE */ int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr) __read_mostly; EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); #endif -/* - * If bridge module is loaded call bridging hook. - * returns NULL if packet was consumed. - */ -struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, - struct sk_buff *skb) __read_mostly; -EXPORT_SYMBOL_GPL(br_handle_frame_hook); - -static inline struct sk_buff *handle_bridge(struct sk_buff *skb, - struct packet_type **pt_prev, int *ret, - struct net_device *orig_dev) -{ - struct net_bridge_port *port; - - if (skb->pkt_type == PACKET_LOOPBACK || - (port = rcu_dereference(skb->dev->br_port)) == NULL) - return skb; - - if (*pt_prev) { - *ret = deliver_skb(skb, *pt_prev, orig_dev); - *pt_prev = NULL; - } - - return br_handle_frame_hook(port, skb); -} -#else -#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) -#endif - -#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) -struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p, - struct sk_buff *skb) __read_mostly; -EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); - -static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, - struct packet_type **pt_prev, - int *ret, - struct net_device *orig_dev) -{ - struct macvlan_port *port; - - port = rcu_dereference(skb->dev->macvlan_port); - if (!port) - return skb; - - if (*pt_prev) { - *ret = deliver_skb(skb, *pt_prev, orig_dev); - *pt_prev = NULL; - } - return macvlan_handle_frame_hook(port, skb); -} -#else -#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) -#endif - #ifdef CONFIG_NET_CLS_ACT /* TODO: Maybe we should just force sch_ingress to be compiled in * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions @@ -2685,10 +2642,10 @@ static int ing_filter(struct sk_buff *skb) int result = TC_ACT_OK; struct Qdisc *q; - if (MAX_RED_LOOP < ttl++) { - printk(KERN_WARNING - "Redir loop detected Dropping packet (%d->%d)\n", - skb->skb_iif, dev->ifindex); + if (unlikely(MAX_RED_LOOP < ttl++)) { + if (net_ratelimit()) + pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", + skb->skb_iif, dev->ifindex); return TC_ACT_SHOT; } @@ -2718,9 +2675,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; - } else { - /* Huh? Why does turning on AF_PACKET affect this? */ - skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); } switch (ing_filter(skb)) { @@ -2763,6 +2717,51 @@ void netif_nit_deliver(struct sk_buff *skb) rcu_read_unlock(); } +/** + * netdev_rx_handler_register - register receive handler + * @dev: device to register a handler for + * @rx_handler: receive handler to register + * @rx_handler_data: data pointer that is used by rx handler + * + * Register a receive hander for a device. This handler will then be + * called from __netif_receive_skb. A negative errno code is returned + * on a failure. + * + * The caller must hold the rtnl_mutex. + */ +int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data) +{ + ASSERT_RTNL(); + + if (dev->rx_handler) + return -EBUSY; + + rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); + rcu_assign_pointer(dev->rx_handler, rx_handler); + + return 0; +} +EXPORT_SYMBOL_GPL(netdev_rx_handler_register); + +/** + * netdev_rx_handler_unregister - unregister receive handler + * @dev: device to unregister a handler from + * + * Unregister a receive hander from a device. + * + * The caller must hold the rtnl_mutex. + */ +void netdev_rx_handler_unregister(struct net_device *dev) +{ + + ASSERT_RTNL(); + rcu_assign_pointer(dev->rx_handler, NULL); + rcu_assign_pointer(dev->rx_handler_data, NULL); +} +EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); + static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, struct net_device *master) { @@ -2784,7 +2783,8 @@ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master) if (master->priv_flags & IFF_MASTER_ARPMON) dev->last_rx = jiffies; - if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { + if ((master->priv_flags & IFF_MASTER_ALB) && + (master->priv_flags & IFF_BRIDGE_PORT)) { /* Do address unmangle. The local destination address * will be always the one master has. Provides the right * functionality in a bridge. @@ -2815,6 +2815,7 @@ EXPORT_SYMBOL(__skb_bond_should_drop); static int __netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; + rx_handler_func_t *rx_handler; struct net_device *orig_dev; struct net_device *master; struct net_device *null_or_orig; @@ -2856,8 +2857,7 @@ static int __netif_receive_skb(struct sk_buff *skb) skb->dev = master; } - __get_cpu_var(softnet_data).processed++; - + __this_cpu_inc(softnet_data.processed); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->mac_len = skb->network_header - skb->mac_header; @@ -2889,12 +2889,17 @@ static int __netif_receive_skb(struct sk_buff *skb) ncls: #endif - skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); - if (!skb) - goto out; - skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); - if (!skb) - goto out; + /* Handle special case of bridge or macvlan */ + rx_handler = rcu_dereference(skb->dev->rx_handler); + if (rx_handler) { + if (pt_prev) { + ret = deliver_skb(skb, pt_prev, orig_dev); + pt_prev = NULL; + } + skb = rx_handler(skb); + if (!skb) + goto out; + } /* * Make sure frames received on VLAN interfaces stacked on @@ -2955,6 +2960,9 @@ int netif_receive_skb(struct sk_buff *skb) if (netdev_tstamp_prequeue) net_timestamp_check(skb); + if (skb_defer_rx_timestamp(skb)) + return NET_RX_SUCCESS; + #ifdef CONFIG_RPS { struct rps_dev_flow voidflow, *rflow = &voidflow; @@ -3719,10 +3727,11 @@ void dev_seq_stop(struct seq_file *seq, void *v) static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { - const struct net_device_stats *stats = dev_get_stats(dev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); - seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " - "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", + seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " + "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", dev->name, stats->rx_bytes, stats->rx_packets, stats->rx_errors, stats->rx_dropped + stats->rx_missed_errors, @@ -5271,20 +5280,22 @@ void netdev_run_todo(void) /** * dev_txq_stats_fold - fold tx_queues stats * @dev: device to get statistics from - * @stats: struct net_device_stats to hold results + * @stats: struct rtnl_link_stats64 to hold results */ void dev_txq_stats_fold(const struct net_device *dev, - struct net_device_stats *stats) + struct rtnl_link_stats64 *stats) { - unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; + u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; unsigned int i; struct netdev_queue *txq; for (i = 0; i < dev->num_tx_queues; i++) { txq = netdev_get_tx_queue(dev, i); + spin_lock_bh(&txq->_xmit_lock); tx_bytes += txq->tx_bytes; tx_packets += txq->tx_packets; tx_dropped += txq->tx_dropped; + spin_unlock_bh(&txq->_xmit_lock); } if (tx_bytes || tx_packets || tx_dropped) { stats->tx_bytes = tx_bytes; @@ -5294,23 +5305,53 @@ void dev_txq_stats_fold(const struct net_device *dev, } EXPORT_SYMBOL(dev_txq_stats_fold); +/* Convert net_device_stats to rtnl_link_stats64. They have the same + * fields in the same order, with only the type differing. + */ +static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats) +{ +#if BITS_PER_LONG == 64 + BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); + memcpy(stats64, netdev_stats, sizeof(*stats64)); +#else + size_t i, n = sizeof(*stats64) / sizeof(u64); + const unsigned long *src = (const unsigned long *)netdev_stats; + u64 *dst = (u64 *)stats64; + + BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != + sizeof(*stats64) / sizeof(u64)); + for (i = 0; i < n; i++) + dst[i] = src[i]; +#endif +} + /** * dev_get_stats - get network device statistics * @dev: device to get statistics from + * @storage: place to store stats * - * Get network statistics from device. The device driver may provide - * its own method by setting dev->netdev_ops->get_stats; otherwise - * the internal statistics structure is used. + * Get network statistics from device. Return @storage. + * The device driver may provide its own method by setting + * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; + * otherwise the internal statistics structure is used. */ -const struct net_device_stats *dev_get_stats(struct net_device *dev) +struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *storage) { const struct net_device_ops *ops = dev->netdev_ops; - if (ops->ndo_get_stats) - return ops->ndo_get_stats(dev); - - dev_txq_stats_fold(dev, &dev->stats); - return &dev->stats; + if (ops->ndo_get_stats64) { + memset(storage, 0, sizeof(*storage)); + return ops->ndo_get_stats64(dev, storage); + } + if (ops->ndo_get_stats) { + netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); + return storage; + } + netdev_stats_to_stats64(storage, &dev->stats); + dev_txq_stats_fold(dev, storage); + return storage; } EXPORT_SYMBOL(dev_get_stats); @@ -5815,6 +5856,68 @@ char *netdev_drivername(const struct net_device *dev, char *buffer, int len) return buffer; } +static int __netdev_printk(const char *level, const struct net_device *dev, + struct va_format *vaf) +{ + int r; + + if (dev && dev->dev.parent) + r = dev_printk(level, dev->dev.parent, "%s: %pV", + netdev_name(dev), vaf); + else if (dev) + r = printk("%s%s: %pV", level, netdev_name(dev), vaf); + else + r = printk("%s(NULL net_device): %pV", level, vaf); + + return r; +} + +int netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + r = __netdev_printk(level, dev, &vaf); + va_end(args); + + return r; +} +EXPORT_SYMBOL(netdev_printk); + +#define define_netdev_printk_level(func, level) \ +int func(const struct net_device *dev, const char *fmt, ...) \ +{ \ + int r; \ + struct va_format vaf; \ + va_list args; \ + \ + va_start(args, fmt); \ + \ + vaf.fmt = fmt; \ + vaf.va = &args; \ + \ + r = __netdev_printk(level, dev, &vaf); \ + va_end(args); \ + \ + return r; \ +} \ +EXPORT_SYMBOL(func); + +define_netdev_printk_level(netdev_emerg, KERN_EMERG); +define_netdev_printk_level(netdev_alert, KERN_ALERT); +define_netdev_printk_level(netdev_crit, KERN_CRIT); +define_netdev_printk_level(netdev_err, KERN_ERR); +define_netdev_printk_level(netdev_warn, KERN_WARNING); +define_netdev_printk_level(netdev_notice, KERN_NOTICE); +define_netdev_printk_level(netdev_info, KERN_INFO); + static void __net_exit netdev_exit(struct net *net) { kfree(net->dev_name_head); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index ad41529fb60f766ad095de5c40b9d537473314f1..36e603c78ce9fd3a5f989716d59bff20a2699293 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -223,6 +223,11 @@ static int set_all_monitor_traces(int state) spin_lock(&trace_state_lock); + if (state == trace_state) { + rc = -EAGAIN; + goto out_unlock; + } + switch (state) { case TRACE_ON: rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); @@ -251,11 +256,12 @@ static int set_all_monitor_traces(int state) if (!rc) trace_state = state; + else + rc = -EINPROGRESS; +out_unlock: spin_unlock(&trace_state_lock); - if (rc) - return -EINPROGRESS; return rc; } @@ -341,9 +347,9 @@ static struct notifier_block dropmon_net_notifier = { static int __init init_net_drop_monitor(void) { - int cpu; - int rc, i, ret; struct per_cpu_dm_data *data; + int cpu, rc; + printk(KERN_INFO "Initalizing network drop monitor service\n"); if (sizeof(void *) > 8) { @@ -351,21 +357,12 @@ static int __init init_net_drop_monitor(void) return -ENOSPC; } - if (genl_register_family(&net_drop_monitor_family) < 0) { + rc = genl_register_family_with_ops(&net_drop_monitor_family, + dropmon_ops, + ARRAY_SIZE(dropmon_ops)); + if (rc) { printk(KERN_ERR "Could not create drop monitor netlink family\n"); - return -EFAULT; - } - - rc = -EFAULT; - - for (i = 0; i < ARRAY_SIZE(dropmon_ops); i++) { - ret = genl_register_ops(&net_drop_monitor_family, - &dropmon_ops[i]); - if (ret) { - printk(KERN_CRIT "Failed to register operation %d\n", - dropmon_ops[i].cmd); - goto out_unreg; - } + return rc; } rc = register_netdevice_notifier(&dropmon_net_notifier); diff --git a/net/core/dst.c b/net/core/dst.c index 9920722cc82b84490cc75d2010976d5652ae4f4b..6c41b1fac3db56a62f5e0f24f3cb76f34e978800 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -197,7 +197,6 @@ static void ___dst_free(struct dst_entry *dst) dst->input = dst->output = dst_discard; dst->obsolete = 2; } -EXPORT_SYMBOL(__dst_free); void __dst_free(struct dst_entry *dst) { @@ -213,6 +212,7 @@ void __dst_free(struct dst_entry *dst) } spin_unlock_bh(&dst_garbage.lock); } +EXPORT_SYMBOL(__dst_free); struct dst_entry *dst_destroy(struct dst_entry * dst) { diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 75e4ffeb8cc99dae9c03c07c39962b17c4453f43..7a85367b3c2f8010af24bbd6b6f4249698f9d78d 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -144,31 +144,13 @@ u32 ethtool_op_get_flags(struct net_device *dev) } EXPORT_SYMBOL(ethtool_op_get_flags); -int ethtool_op_set_flags(struct net_device *dev, u32 data) +int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) { - const struct ethtool_ops *ops = dev->ethtool_ops; - unsigned long features = dev->features; - - if (data & ETH_FLAG_LRO) - features |= NETIF_F_LRO; - else - features &= ~NETIF_F_LRO; - - if (data & ETH_FLAG_NTUPLE) { - if (!ops->set_rx_ntuple) - return -EOPNOTSUPP; - features |= NETIF_F_NTUPLE; - } else { - /* safe to clear regardless */ - features &= ~NETIF_F_NTUPLE; - } - - if (data & ETH_FLAG_RXHASH) - features |= NETIF_F_RXHASH; - else - features &= ~NETIF_F_RXHASH; + if (data & ~supported) + return -EINVAL; - dev->features = features; + dev->features = ((dev->features & ~flags_dup_features) | + (data & flags_dup_features)); return 0; } EXPORT_SYMBOL(ethtool_op_set_flags); @@ -395,6 +377,80 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, return ret; } +static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_rxfh_indir *indir; + u32 table_size; + size_t full_size; + int ret; + + if (!dev->ethtool_ops->get_rxfh_indir) + return -EOPNOTSUPP; + + if (copy_from_user(&table_size, + useraddr + offsetof(struct ethtool_rxfh_indir, size), + sizeof(table_size))) + return -EFAULT; + + if (table_size > + (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) + return -ENOMEM; + full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; + indir = kmalloc(full_size, GFP_USER); + if (!indir) + return -ENOMEM; + + indir->cmd = ETHTOOL_GRXFHINDIR; + indir->size = table_size; + ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); + if (ret) + goto out; + + if (copy_to_user(useraddr, indir, full_size)) + ret = -EFAULT; + +out: + kfree(indir); + return ret; +} + +static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, + void __user *useraddr) +{ + struct ethtool_rxfh_indir *indir; + u32 table_size; + size_t full_size; + int ret; + + if (!dev->ethtool_ops->set_rxfh_indir) + return -EOPNOTSUPP; + + if (copy_from_user(&table_size, + useraddr + offsetof(struct ethtool_rxfh_indir, size), + sizeof(table_size))) + return -EFAULT; + + if (table_size > + (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) + return -ENOMEM; + full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; + indir = kmalloc(full_size, GFP_USER); + if (!indir) + return -ENOMEM; + + if (copy_from_user(indir, useraddr, full_size)) { + ret = -EFAULT; + goto out; + } + + ret = dev->ethtool_ops->set_rxfh_indir(dev, indir); + +out: + kfree(indir); + return ret; +} + static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, struct ethtool_rx_ntuple_flow_spec *spec, struct ethtool_rx_ntuple_flow_spec_container *fsc) @@ -1563,6 +1619,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GSSET_INFO: rc = ethtool_get_sset_info(dev, useraddr); break; + case ETHTOOL_GRXFHINDIR: + rc = ethtool_get_rxfh_indir(dev, useraddr); + break; + case ETHTOOL_SRXFHINDIR: + rc = ethtool_set_rxfh_indir(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } diff --git a/net/core/filter.c b/net/core/filter.c index da69fb728d328f1b47236cebaaea7ae26f3bcd47..52b051f82a016e37c792efc8bc30dca3d71fd537 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -128,87 +128,87 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int fentry = &filter[pc]; switch (fentry->code) { - case BPF_ALU|BPF_ADD|BPF_X: + case BPF_S_ALU_ADD_X: A += X; continue; - case BPF_ALU|BPF_ADD|BPF_K: + case BPF_S_ALU_ADD_K: A += fentry->k; continue; - case BPF_ALU|BPF_SUB|BPF_X: + case BPF_S_ALU_SUB_X: A -= X; continue; - case BPF_ALU|BPF_SUB|BPF_K: + case BPF_S_ALU_SUB_K: A -= fentry->k; continue; - case BPF_ALU|BPF_MUL|BPF_X: + case BPF_S_ALU_MUL_X: A *= X; continue; - case BPF_ALU|BPF_MUL|BPF_K: + case BPF_S_ALU_MUL_K: A *= fentry->k; continue; - case BPF_ALU|BPF_DIV|BPF_X: + case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; - case BPF_ALU|BPF_DIV|BPF_K: + case BPF_S_ALU_DIV_K: A /= fentry->k; continue; - case BPF_ALU|BPF_AND|BPF_X: + case BPF_S_ALU_AND_X: A &= X; continue; - case BPF_ALU|BPF_AND|BPF_K: + case BPF_S_ALU_AND_K: A &= fentry->k; continue; - case BPF_ALU|BPF_OR|BPF_X: + case BPF_S_ALU_OR_X: A |= X; continue; - case BPF_ALU|BPF_OR|BPF_K: + case BPF_S_ALU_OR_K: A |= fentry->k; continue; - case BPF_ALU|BPF_LSH|BPF_X: + case BPF_S_ALU_LSH_X: A <<= X; continue; - case BPF_ALU|BPF_LSH|BPF_K: + case BPF_S_ALU_LSH_K: A <<= fentry->k; continue; - case BPF_ALU|BPF_RSH|BPF_X: + case BPF_S_ALU_RSH_X: A >>= X; continue; - case BPF_ALU|BPF_RSH|BPF_K: + case BPF_S_ALU_RSH_K: A >>= fentry->k; continue; - case BPF_ALU|BPF_NEG: + case BPF_S_ALU_NEG: A = -A; continue; - case BPF_JMP|BPF_JA: + case BPF_S_JMP_JA: pc += fentry->k; continue; - case BPF_JMP|BPF_JGT|BPF_K: + case BPF_S_JMP_JGT_K: pc += (A > fentry->k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JGE|BPF_K: + case BPF_S_JMP_JGE_K: pc += (A >= fentry->k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JEQ|BPF_K: + case BPF_S_JMP_JEQ_K: pc += (A == fentry->k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JSET|BPF_K: + case BPF_S_JMP_JSET_K: pc += (A & fentry->k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JGT|BPF_X: + case BPF_S_JMP_JGT_X: pc += (A > X) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JGE|BPF_X: + case BPF_S_JMP_JGE_X: pc += (A >= X) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JEQ|BPF_X: + case BPF_S_JMP_JEQ_X: pc += (A == X) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JSET|BPF_X: + case BPF_S_JMP_JSET_X: pc += (A & X) ? fentry->jt : fentry->jf; continue; - case BPF_LD|BPF_W|BPF_ABS: + case BPF_S_LD_W_ABS: k = fentry->k; load_w: ptr = load_pointer(skb, k, 4, &tmp); @@ -217,7 +217,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int continue; } break; - case BPF_LD|BPF_H|BPF_ABS: + case BPF_S_LD_H_ABS: k = fentry->k; load_h: ptr = load_pointer(skb, k, 2, &tmp); @@ -226,7 +226,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int continue; } break; - case BPF_LD|BPF_B|BPF_ABS: + case BPF_S_LD_B_ABS: k = fentry->k; load_b: ptr = load_pointer(skb, k, 1, &tmp); @@ -235,54 +235,54 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int continue; } break; - case BPF_LD|BPF_W|BPF_LEN: + case BPF_S_LD_W_LEN: A = skb->len; continue; - case BPF_LDX|BPF_W|BPF_LEN: + case BPF_S_LDX_W_LEN: X = skb->len; continue; - case BPF_LD|BPF_W|BPF_IND: + case BPF_S_LD_W_IND: k = X + fentry->k; goto load_w; - case BPF_LD|BPF_H|BPF_IND: + case BPF_S_LD_H_IND: k = X + fentry->k; goto load_h; - case BPF_LD|BPF_B|BPF_IND: + case BPF_S_LD_B_IND: k = X + fentry->k; goto load_b; - case BPF_LDX|BPF_B|BPF_MSH: + case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, fentry->k, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; - case BPF_LD|BPF_IMM: + case BPF_S_LD_IMM: A = fentry->k; continue; - case BPF_LDX|BPF_IMM: + case BPF_S_LDX_IMM: X = fentry->k; continue; - case BPF_LD|BPF_MEM: + case BPF_S_LD_MEM: A = mem[fentry->k]; continue; - case BPF_LDX|BPF_MEM: + case BPF_S_LDX_MEM: X = mem[fentry->k]; continue; - case BPF_MISC|BPF_TAX: + case BPF_S_MISC_TAX: X = A; continue; - case BPF_MISC|BPF_TXA: + case BPF_S_MISC_TXA: A = X; continue; - case BPF_RET|BPF_K: + case BPF_S_RET_K: return fentry->k; - case BPF_RET|BPF_A: + case BPF_S_RET_A: return A; - case BPF_ST: + case BPF_S_ST: mem[fentry->k] = A; continue; - case BPF_STX: + case BPF_S_STX: mem[fentry->k] = X; continue; default: @@ -390,53 +390,128 @@ int sk_chk_filter(struct sock_filter *filter, int flen) /* Only allow valid instructions */ switch (ftest->code) { case BPF_ALU|BPF_ADD|BPF_K: + ftest->code = BPF_S_ALU_ADD_K; + break; case BPF_ALU|BPF_ADD|BPF_X: + ftest->code = BPF_S_ALU_ADD_X; + break; case BPF_ALU|BPF_SUB|BPF_K: + ftest->code = BPF_S_ALU_SUB_K; + break; case BPF_ALU|BPF_SUB|BPF_X: + ftest->code = BPF_S_ALU_SUB_X; + break; case BPF_ALU|BPF_MUL|BPF_K: + ftest->code = BPF_S_ALU_MUL_K; + break; case BPF_ALU|BPF_MUL|BPF_X: + ftest->code = BPF_S_ALU_MUL_X; + break; case BPF_ALU|BPF_DIV|BPF_X: + ftest->code = BPF_S_ALU_DIV_X; + break; case BPF_ALU|BPF_AND|BPF_K: + ftest->code = BPF_S_ALU_AND_K; + break; case BPF_ALU|BPF_AND|BPF_X: + ftest->code = BPF_S_ALU_AND_X; + break; case BPF_ALU|BPF_OR|BPF_K: + ftest->code = BPF_S_ALU_OR_K; + break; case BPF_ALU|BPF_OR|BPF_X: + ftest->code = BPF_S_ALU_OR_X; + break; case BPF_ALU|BPF_LSH|BPF_K: + ftest->code = BPF_S_ALU_LSH_K; + break; case BPF_ALU|BPF_LSH|BPF_X: + ftest->code = BPF_S_ALU_LSH_X; + break; case BPF_ALU|BPF_RSH|BPF_K: + ftest->code = BPF_S_ALU_RSH_K; + break; case BPF_ALU|BPF_RSH|BPF_X: + ftest->code = BPF_S_ALU_RSH_X; + break; case BPF_ALU|BPF_NEG: + ftest->code = BPF_S_ALU_NEG; + break; case BPF_LD|BPF_W|BPF_ABS: + ftest->code = BPF_S_LD_W_ABS; + break; case BPF_LD|BPF_H|BPF_ABS: + ftest->code = BPF_S_LD_H_ABS; + break; case BPF_LD|BPF_B|BPF_ABS: + ftest->code = BPF_S_LD_B_ABS; + break; case BPF_LD|BPF_W|BPF_LEN: + ftest->code = BPF_S_LD_W_LEN; + break; case BPF_LD|BPF_W|BPF_IND: + ftest->code = BPF_S_LD_W_IND; + break; case BPF_LD|BPF_H|BPF_IND: + ftest->code = BPF_S_LD_H_IND; + break; case BPF_LD|BPF_B|BPF_IND: + ftest->code = BPF_S_LD_B_IND; + break; case BPF_LD|BPF_IMM: + ftest->code = BPF_S_LD_IMM; + break; case BPF_LDX|BPF_W|BPF_LEN: + ftest->code = BPF_S_LDX_W_LEN; + break; case BPF_LDX|BPF_B|BPF_MSH: + ftest->code = BPF_S_LDX_B_MSH; + break; case BPF_LDX|BPF_IMM: + ftest->code = BPF_S_LDX_IMM; + break; case BPF_MISC|BPF_TAX: + ftest->code = BPF_S_MISC_TAX; + break; case BPF_MISC|BPF_TXA: + ftest->code = BPF_S_MISC_TXA; + break; case BPF_RET|BPF_K: + ftest->code = BPF_S_RET_K; + break; case BPF_RET|BPF_A: + ftest->code = BPF_S_RET_A; break; /* Some instructions need special checks */ - case BPF_ALU|BPF_DIV|BPF_K: /* check for division by zero */ + case BPF_ALU|BPF_DIV|BPF_K: if (ftest->k == 0) return -EINVAL; + ftest->code = BPF_S_ALU_DIV_K; break; + /* check for invalid memory addresses */ case BPF_LD|BPF_MEM: + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + ftest->code = BPF_S_LD_MEM; + break; case BPF_LDX|BPF_MEM: + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + ftest->code = BPF_S_LDX_MEM; + break; case BPF_ST: + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + ftest->code = BPF_S_ST; + break; case BPF_STX: - /* check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; + ftest->code = BPF_S_STX; break; case BPF_JMP|BPF_JA: @@ -447,28 +522,63 @@ int sk_chk_filter(struct sock_filter *filter, int flen) */ if (ftest->k >= (unsigned)(flen-pc-1)) return -EINVAL; + ftest->code = BPF_S_JMP_JA; break; case BPF_JMP|BPF_JEQ|BPF_K: + ftest->code = BPF_S_JMP_JEQ_K; + break; case BPF_JMP|BPF_JEQ|BPF_X: + ftest->code = BPF_S_JMP_JEQ_X; + break; case BPF_JMP|BPF_JGE|BPF_K: + ftest->code = BPF_S_JMP_JGE_K; + break; case BPF_JMP|BPF_JGE|BPF_X: + ftest->code = BPF_S_JMP_JGE_X; + break; case BPF_JMP|BPF_JGT|BPF_K: + ftest->code = BPF_S_JMP_JGT_K; + break; case BPF_JMP|BPF_JGT|BPF_X: + ftest->code = BPF_S_JMP_JGT_X; + break; case BPF_JMP|BPF_JSET|BPF_K: + ftest->code = BPF_S_JMP_JSET_K; + break; case BPF_JMP|BPF_JSET|BPF_X: + ftest->code = BPF_S_JMP_JSET_X; + break; + + default: + return -EINVAL; + } + /* for conditionals both must be safe */ + switch (ftest->code) { + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JEQ_X: + case BPF_S_JMP_JGE_K: + case BPF_S_JMP_JGE_X: + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGT_X: + case BPF_S_JMP_JSET_X: + case BPF_S_JMP_JSET_K: if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; - break; + } + } + /* last instruction must be a RET code */ + switch (filter[flen - 1].code) { + case BPF_S_RET_K: + case BPF_S_RET_A: + return 0; + break; default: return -EINVAL; } - } - - return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; } EXPORT_SYMBOL(sk_chk_filter); diff --git a/net/core/flow.c b/net/core/flow.c index 161900674009d832752a2b3929586b53522f064f..f67dcbfe54efa91cbc154c093f076540ded5c23f 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -62,6 +62,7 @@ struct flow_cache { }; atomic_t flow_cache_genid = ATOMIC_INIT(0); +EXPORT_SYMBOL(flow_cache_genid); static struct flow_cache flow_cache_global; static struct kmem_cache *flow_cachep; @@ -222,7 +223,7 @@ flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, unsigned int hash; local_bh_disable(); - fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); + fcp = this_cpu_ptr(fc->percpu); fle = NULL; flo = NULL; @@ -291,6 +292,7 @@ flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, local_bh_enable(); return flo; } +EXPORT_SYMBOL(flow_cache_lookup); static void flow_cache_flush_tasklet(unsigned long data) { @@ -302,7 +304,7 @@ static void flow_cache_flush_tasklet(unsigned long data) LIST_HEAD(gc_list); int i, deleted = 0; - fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); + fcp = this_cpu_ptr(fc->percpu); for (i = 0; i < flow_cache_hash_size(fc); i++) { hlist_for_each_entry_safe(fle, entry, tmp, &fcp->hash_table[i], u.hlist) { @@ -424,6 +426,3 @@ static int __init flow_cache_init_global(void) } module_init(flow_cache_init_global); - -EXPORT_SYMBOL(flow_cache_genid); -EXPORT_SYMBOL(flow_cache_lookup); diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 785e5276a300e6f14c634e785eb1be8e0633c18e..9fbe7f7429b0efd036cc9e4a7df06293f85a8f0b 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -263,6 +263,7 @@ static void __gen_kill_estimator(struct rcu_head *head) * * Removes the rate estimator specified by &bstats and &rate_est. * + * Note : Caller should respect an RCU grace period before freeing stats_lock */ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est) diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 393b1d8618e22d412625c96308bcdf3be8f5a43a..0452eb27a2724dcd3782f5acb15be04e7efba47e 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -73,6 +73,7 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, return 0; } +EXPORT_SYMBOL(gnet_stats_start_copy_compat); /** * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode @@ -93,6 +94,7 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, { return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d); } +EXPORT_SYMBOL(gnet_stats_start_copy); /** * gnet_stats_copy_basic - copy basic statistics into statistic TLV @@ -123,6 +125,7 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) } return 0; } +EXPORT_SYMBOL(gnet_stats_copy_basic); /** * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV @@ -154,6 +157,7 @@ gnet_stats_copy_rate_est(struct gnet_dump *d, return 0; } +EXPORT_SYMBOL(gnet_stats_copy_rate_est); /** * gnet_stats_copy_queue - copy queue statistics into statistics TLV @@ -181,6 +185,7 @@ gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q) return 0; } +EXPORT_SYMBOL(gnet_stats_copy_queue); /** * gnet_stats_copy_app - copy application specific statistics into statistics TLV @@ -208,6 +213,7 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) return 0; } +EXPORT_SYMBOL(gnet_stats_copy_app); /** * gnet_stats_finish_copy - finish dumping procedure @@ -241,12 +247,4 @@ gnet_stats_finish_copy(struct gnet_dump *d) spin_unlock_bh(d->lock); return 0; } - - -EXPORT_SYMBOL(gnet_stats_start_copy); -EXPORT_SYMBOL(gnet_stats_start_copy_compat); -EXPORT_SYMBOL(gnet_stats_copy_basic); -EXPORT_SYMBOL(gnet_stats_copy_rate_est); -EXPORT_SYMBOL(gnet_stats_copy_queue); -EXPORT_SYMBOL(gnet_stats_copy_app); EXPORT_SYMBOL(gnet_stats_finish_copy); diff --git a/net/core/iovec.c b/net/core/iovec.c index 1e7f4e91a935f65b8fbb1ff60efd1663a39686cc..1cd98df412dfd52daee9cc9105ceddfea0d87a59 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -95,6 +95,7 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) return 0; } +EXPORT_SYMBOL(memcpy_toiovec); /* * Copy kernel to iovec. Returns -EFAULT on error. @@ -120,6 +121,7 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, return 0; } +EXPORT_SYMBOL(memcpy_toiovecend); /* * Copy iovec to kernel. Returns -EFAULT on error. @@ -144,6 +146,7 @@ int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) return 0; } +EXPORT_SYMBOL(memcpy_fromiovec); /* * Copy iovec from kernel. Returns -EFAULT on error. @@ -172,6 +175,7 @@ int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, return 0; } +EXPORT_SYMBOL(memcpy_fromiovecend); /* * And now for the all-in-one: copy and checksum from a user iovec @@ -256,9 +260,4 @@ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, err = -EFAULT; goto out; } - EXPORT_SYMBOL(csum_partial_copy_fromiovecend); -EXPORT_SYMBOL(memcpy_fromiovec); -EXPORT_SYMBOL(memcpy_fromiovecend); -EXPORT_SYMBOL(memcpy_toiovec); -EXPORT_SYMBOL(memcpy_toiovecend); diff --git a/net/core/link_watch.c b/net/core/link_watch.c index bdbce2f5875be574cf367a2493bf290e9b99bcd8..01a1101b5936a7d4b87a3542fe0c9192c550b717 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -243,5 +243,4 @@ void linkwatch_fire_event(struct net_device *dev) linkwatch_schedule_work(urgent); } - EXPORT_SYMBOL(linkwatch_fire_event); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 99e7052d7323715a4c1e65ecfd7977785a686086..af4dfbadf2a09ea496653990ae8e1da823a059e1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -29,6 +29,7 @@ static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; static const char fmt_ulong[] = "%lu\n"; +static const char fmt_u64[] = "%llu\n"; static inline int dev_isalive(const struct net_device *dev) { @@ -94,6 +95,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, } NETDEVICE_SHOW(dev_id, fmt_hex); +NETDEVICE_SHOW(addr_assign_type, fmt_dec); NETDEVICE_SHOW(addr_len, fmt_dec); NETDEVICE_SHOW(iflink, fmt_dec); NETDEVICE_SHOW(ifindex, fmt_dec); @@ -294,6 +296,7 @@ static ssize_t show_ifalias(struct device *dev, } static struct device_attribute net_class_attributes[] = { + __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL), __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), @@ -324,14 +327,15 @@ static ssize_t netstat_show(const struct device *d, struct net_device *dev = to_net_dev(d); ssize_t ret = -EINVAL; - WARN_ON(offset > sizeof(struct net_device_stats) || - offset % sizeof(unsigned long) != 0); + WARN_ON(offset > sizeof(struct rtnl_link_stats64) || + offset % sizeof(u64) != 0); read_lock(&dev_base_lock); if (dev_isalive(dev)) { - const struct net_device_stats *stats = dev_get_stats(dev); - ret = sprintf(buf, fmt_ulong, - *(unsigned long *)(((u8 *) stats) + offset)); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); + + ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset)); } read_unlock(&dev_base_lock); return ret; @@ -343,7 +347,7 @@ static ssize_t show_##name(struct device *d, \ struct device_attribute *attr, char *buf) \ { \ return netstat_show(d, attr, buf, \ - offsetof(struct net_device_stats, name)); \ + offsetof(struct rtnl_link_stats64, name)); \ } \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) @@ -922,13 +926,12 @@ int netdev_class_create_file(struct class_attribute *class_attr) { return class_create_file(&net_class, class_attr); } +EXPORT_SYMBOL(netdev_class_create_file); void netdev_class_remove_file(struct class_attribute *class_attr) { class_remove_file(&net_class, class_attr); } - -EXPORT_SYMBOL(netdev_class_create_file); EXPORT_SYMBOL(netdev_class_remove_file); int netdev_kobject_init(void) diff --git a/net/core/netevent.c b/net/core/netevent.c index 95f81de87502ee13cae8c4664c1cb65e949ff159..865f0ceb81fbbb0e307f4f1081a1de7edc00b40d 100644 --- a/net/core/netevent.c +++ b/net/core/netevent.c @@ -35,6 +35,7 @@ int register_netevent_notifier(struct notifier_block *nb) err = atomic_notifier_chain_register(&netevent_notif_chain, nb); return err; } +EXPORT_SYMBOL_GPL(register_netevent_notifier); /** * netevent_unregister_notifier - unregister a netevent notifier block @@ -50,6 +51,7 @@ int unregister_netevent_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); } +EXPORT_SYMBOL_GPL(unregister_netevent_notifier); /** * call_netevent_notifiers - call all netevent notifier blocks @@ -64,7 +66,4 @@ int call_netevent_notifiers(unsigned long val, void *v) { return atomic_notifier_call_chain(&netevent_notif_chain, val, v); } - -EXPORT_SYMBOL_GPL(register_netevent_notifier); -EXPORT_SYMBOL_GPL(unregister_netevent_notifier); EXPORT_SYMBOL_GPL(call_netevent_notifiers); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 94825b109551e81b1c22a09459b5e0262a97d5e4..537e01afd81baf1e9bc7269e0c97ca3fb3844ffe 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -199,11 +199,13 @@ void netpoll_poll_dev(struct net_device *dev) zap_completion_queue(); } +EXPORT_SYMBOL(netpoll_poll_dev); void netpoll_poll(struct netpoll *np) { netpoll_poll_dev(np->dev); } +EXPORT_SYMBOL(netpoll_poll); static void refill_skbs(void) { @@ -292,6 +294,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) unsigned long tries; struct net_device *dev = np->dev; const struct net_device_ops *ops = dev->netdev_ops; + /* It is up to the caller to keep npinfo alive. */ struct netpoll_info *npinfo = np->dev->npinfo; if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { @@ -343,6 +346,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) schedule_delayed_work(&npinfo->tx_work,0); } } +EXPORT_SYMBOL(netpoll_send_skb); void netpoll_send_udp(struct netpoll *np, const char *msg, int len) { @@ -404,6 +408,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) netpoll_send_skb(np, skb); } +EXPORT_SYMBOL(netpoll_send_udp); static void arp_reply(struct sk_buff *skb) { @@ -630,6 +635,7 @@ void netpoll_print_options(struct netpoll *np) printk(KERN_INFO "%s: remote ethernet address %pM\n", np->name, np->remote_mac); } +EXPORT_SYMBOL(netpoll_print_options); int netpoll_parse_options(struct netpoll *np, char *opt) { @@ -722,30 +728,29 @@ int netpoll_parse_options(struct netpoll *np, char *opt) np->name, cur); return -1; } +EXPORT_SYMBOL(netpoll_parse_options); -int netpoll_setup(struct netpoll *np) +int __netpoll_setup(struct netpoll *np) { - struct net_device *ndev = NULL; - struct in_device *in_dev; + struct net_device *ndev = np->dev; struct netpoll_info *npinfo; - struct netpoll *npe, *tmp; + const struct net_device_ops *ops; unsigned long flags; int err; - if (np->dev_name) - ndev = dev_get_by_name(&init_net, np->dev_name); - if (!ndev) { - printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", + if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || + !ndev->netdev_ops->ndo_poll_controller) { + printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", np->name, np->dev_name); - return -ENODEV; + err = -ENOTSUPP; + goto out; } - np->dev = ndev; if (!ndev->npinfo) { npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); if (!npinfo) { err = -ENOMEM; - goto put; + goto out; } npinfo->rx_flags = 0; @@ -757,6 +762,13 @@ int netpoll_setup(struct netpoll *np) INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); atomic_set(&npinfo->refcnt, 1); + + ops = np->dev->netdev_ops; + if (ops->ndo_netpoll_setup) { + err = ops->ndo_netpoll_setup(ndev, npinfo); + if (err) + goto free_npinfo; + } } else { npinfo = ndev->npinfo; atomic_inc(&npinfo->refcnt); @@ -764,12 +776,37 @@ int netpoll_setup(struct netpoll *np) npinfo->netpoll = np; - if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || - !ndev->netdev_ops->ndo_poll_controller) { - printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", + if (np->rx_hook) { + spin_lock_irqsave(&npinfo->rx_lock, flags); + npinfo->rx_flags |= NETPOLL_RX_ENABLED; + list_add_tail(&np->rx, &npinfo->rx_np); + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } + + /* last thing to do is link it to the net device structure */ + rcu_assign_pointer(ndev->npinfo, npinfo); + + return 0; + +free_npinfo: + kfree(npinfo); +out: + return err; +} +EXPORT_SYMBOL_GPL(__netpoll_setup); + +int netpoll_setup(struct netpoll *np) +{ + struct net_device *ndev = NULL; + struct in_device *in_dev; + int err; + + if (np->dev_name) + ndev = dev_get_by_name(&init_net, np->dev_name); + if (!ndev) { + printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", np->name, np->dev_name); - err = -ENOTSUPP; - goto release; + return -ENODEV; } if (!netif_running(ndev)) { @@ -785,7 +822,7 @@ int netpoll_setup(struct netpoll *np) if (err) { printk(KERN_ERR "%s: failed to open %s\n", np->name, ndev->name); - goto release; + goto put; } atleast = jiffies + HZ/10; @@ -822,7 +859,7 @@ int netpoll_setup(struct netpoll *np) printk(KERN_ERR "%s: no IP address for %s, aborting\n", np->name, np->dev_name); err = -EDESTADDRREQ; - goto release; + goto put; } np->local_ip = in_dev->ifa_list->ifa_local; @@ -830,38 +867,25 @@ int netpoll_setup(struct netpoll *np) printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); } - if (np->rx_hook) { - spin_lock_irqsave(&npinfo->rx_lock, flags); - npinfo->rx_flags |= NETPOLL_RX_ENABLED; - list_add_tail(&np->rx, &npinfo->rx_np); - spin_unlock_irqrestore(&npinfo->rx_lock, flags); - } + np->dev = ndev; /* fill up the skb queue */ refill_skbs(); - /* last thing to do is link it to the net device structure */ - ndev->npinfo = npinfo; + rtnl_lock(); + err = __netpoll_setup(np); + rtnl_unlock(); - /* avoid racing with NAPI reading npinfo */ - synchronize_rcu(); + if (err) + goto put; return 0; - release: - if (!ndev->npinfo) { - spin_lock_irqsave(&npinfo->rx_lock, flags); - list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) { - npe->dev = NULL; - } - spin_unlock_irqrestore(&npinfo->rx_lock, flags); - - kfree(npinfo); - } put: dev_put(ndev); return err; } +EXPORT_SYMBOL(netpoll_setup); static int __init netpoll_init(void) { @@ -870,49 +894,65 @@ static int __init netpoll_init(void) } core_initcall(netpoll_init); -void netpoll_cleanup(struct netpoll *np) +void __netpoll_cleanup(struct netpoll *np) { struct netpoll_info *npinfo; unsigned long flags; - if (np->dev) { - npinfo = np->dev->npinfo; - if (npinfo) { - if (!list_empty(&npinfo->rx_np)) { - spin_lock_irqsave(&npinfo->rx_lock, flags); - list_del(&np->rx); - if (list_empty(&npinfo->rx_np)) - npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; - spin_unlock_irqrestore(&npinfo->rx_lock, flags); - } + npinfo = np->dev->npinfo; + if (!npinfo) + return; - if (atomic_dec_and_test(&npinfo->refcnt)) { - const struct net_device_ops *ops; - skb_queue_purge(&npinfo->arp_tx); - skb_queue_purge(&npinfo->txq); - cancel_rearming_delayed_work(&npinfo->tx_work); - - /* clean after last, unfinished work */ - __skb_queue_purge(&npinfo->txq); - kfree(npinfo); - ops = np->dev->netdev_ops; - if (ops->ndo_netpoll_cleanup) - ops->ndo_netpoll_cleanup(np->dev); - else - np->dev->npinfo = NULL; - } - } + if (!list_empty(&npinfo->rx_np)) { + spin_lock_irqsave(&npinfo->rx_lock, flags); + list_del(&np->rx); + if (list_empty(&npinfo->rx_np)) + npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } + + if (atomic_dec_and_test(&npinfo->refcnt)) { + const struct net_device_ops *ops; + + ops = np->dev->netdev_ops; + if (ops->ndo_netpoll_cleanup) + ops->ndo_netpoll_cleanup(np->dev); + + rcu_assign_pointer(np->dev->npinfo, NULL); + + /* avoid racing with NAPI reading npinfo */ + synchronize_rcu_bh(); + + skb_queue_purge(&npinfo->arp_tx); + skb_queue_purge(&npinfo->txq); + cancel_rearming_delayed_work(&npinfo->tx_work); - dev_put(np->dev); + /* clean after last, unfinished work */ + __skb_queue_purge(&npinfo->txq); + kfree(npinfo); } +} +EXPORT_SYMBOL_GPL(__netpoll_cleanup); + +void netpoll_cleanup(struct netpoll *np) +{ + if (!np->dev) + return; + rtnl_lock(); + __netpoll_cleanup(np); + rtnl_unlock(); + + dev_put(np->dev); np->dev = NULL; } +EXPORT_SYMBOL(netpoll_cleanup); int netpoll_trap(void) { return atomic_read(&trapped); } +EXPORT_SYMBOL(netpoll_trap); void netpoll_set_trap(int trap) { @@ -921,14 +961,4 @@ void netpoll_set_trap(int trap) else atomic_dec(&trapped); } - -EXPORT_SYMBOL(netpoll_send_skb); EXPORT_SYMBOL(netpoll_set_trap); -EXPORT_SYMBOL(netpoll_trap); -EXPORT_SYMBOL(netpoll_print_options); -EXPORT_SYMBOL(netpoll_parse_options); -EXPORT_SYMBOL(netpoll_setup); -EXPORT_SYMBOL(netpoll_cleanup); -EXPORT_SYMBOL(netpoll_send_udp); -EXPORT_SYMBOL(netpoll_poll_dev); -EXPORT_SYMBOL(netpoll_poll); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 1dacd7ba8dbb1d4c2e5e6e9f623b3806e202f0ee..10a1ea72010d329295ce3936228aa5bffe7dc745 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -115,6 +115,9 @@ * command by Adit Ranadive * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -169,11 +172,13 @@ #include #include /* do_div */ -#define VERSION "2.73" +#define VERSION "2.74" #define IP_NAME_SZ 32 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ #define MPLS_STACK_BOTTOM htonl(0x00000100) +#define func_enter() pr_debug("entering %s\n", __func__); + /* Device flag bits */ #define F_IPSRC_RND (1<<0) /* IP-Src Random */ #define F_IPDST_RND (1<<1) /* IP-Dst Random */ @@ -424,7 +429,8 @@ static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2) } static const char version[] = - "pktgen " VERSION ": Packet Generator for packet performance testing.\n"; + "Packet Generator for packet performance testing. " + "Version: " VERSION "\n"; static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); @@ -495,7 +501,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf, pktgen_reset_all_threads(); else - printk(KERN_WARNING "pktgen: Unknown command: %s\n", data); + pr_warning("Unknown command: %s\n", data); err = count; @@ -840,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file, const char __user * user_buffer, size_t count, loff_t * offset) { - struct seq_file *seq = (struct seq_file *)file->private_data; + struct seq_file *seq = file->private_data; struct pktgen_dev *pkt_dev = seq->private; int i = 0, max, len; char name[16], valstr[32]; @@ -852,14 +858,14 @@ static ssize_t pktgen_if_write(struct file *file, pg_result = &(pkt_dev->result[0]); if (count < 1) { - printk(KERN_WARNING "pktgen: wrong command format\n"); + pr_warning("wrong command format\n"); return -EINVAL; } max = count - i; tmp = count_trail_chars(&user_buffer[i], max); if (tmp < 0) { - printk(KERN_WARNING "pktgen: illegal format\n"); + pr_warning("illegal format\n"); return tmp; } i += tmp; @@ -980,6 +986,36 @@ static ssize_t pktgen_if_write(struct file *file, (unsigned long long) pkt_dev->delay); return count; } + if (!strcmp(name, "rate")) { + len = num_arg(&user_buffer[i], 10, &value); + if (len < 0) + return len; + + i += len; + if (!value) + return len; + pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; + if (debug) + pr_info("Delay set at: %llu ns\n", pkt_dev->delay); + + sprintf(pg_result, "OK: rate=%lu", value); + return count; + } + if (!strcmp(name, "ratep")) { + len = num_arg(&user_buffer[i], 10, &value); + if (len < 0) + return len; + + i += len; + if (!value) + return len; + pkt_dev->delay = NSEC_PER_SEC/value; + if (debug) + pr_info("Delay set at: %llu ns\n", pkt_dev->delay); + + sprintf(pg_result, "OK: rate=%lu", value); + return count; + } if (!strcmp(name, "udp_src_min")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) @@ -1398,18 +1434,12 @@ static ssize_t pktgen_if_write(struct file *file, i += len; for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) { - if (*v >= '0' && *v <= '9') { - *m *= 16; - *m += *v - '0'; - } - if (*v >= 'A' && *v <= 'F') { - *m *= 16; - *m += *v - 'A' + 10; - } - if (*v >= 'a' && *v <= 'f') { - *m *= 16; - *m += *v - 'a' + 10; - } + int value; + + value = hex_to_bin(*v); + if (value >= 0) + *m = *m * 16 + value; + if (*v == ':') { m++; *m = 0; @@ -1440,18 +1470,12 @@ static ssize_t pktgen_if_write(struct file *file, i += len; for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) { - if (*v >= '0' && *v <= '9') { - *m *= 16; - *m += *v - '0'; - } - if (*v >= 'A' && *v <= 'F') { - *m *= 16; - *m += *v - 'A' + 10; - } - if (*v >= 'a' && *v <= 'f') { - *m *= 16; - *m += *v - 'a' + 10; - } + int value; + + value = hex_to_bin(*v); + if (value >= 0) + *m = *m * 16 + value; + if (*v == ':') { m++; *m = 0; @@ -1740,7 +1764,7 @@ static ssize_t pktgen_thread_write(struct file *file, const char __user * user_buffer, size_t count, loff_t * offset) { - struct seq_file *seq = (struct seq_file *)file->private_data; + struct seq_file *seq = file->private_data; struct pktgen_thread *t = seq->private; int i = 0, max, len, ret; char name[40]; @@ -1781,7 +1805,7 @@ static ssize_t pktgen_thread_write(struct file *file, name, (unsigned long)count); if (!t) { - printk(KERN_ERR "pktgen: ERROR: No thread\n"); + pr_err("ERROR: No thread\n"); ret = -EINVAL; goto out; } @@ -1874,7 +1898,7 @@ static void pktgen_mark_device(const char *ifname) int i = 0; mutex_lock(&pktgen_thread_lock); - pr_debug("pktgen: pktgen_mark_device marking %s for removal\n", ifname); + pr_debug("%s: marking %s for removal\n", __func__, ifname); while (1) { @@ -1883,15 +1907,14 @@ static void pktgen_mark_device(const char *ifname) break; /* success */ mutex_unlock(&pktgen_thread_lock); - pr_debug("pktgen: pktgen_mark_device waiting for %s " - "to disappear....\n", ifname); + pr_debug("%s: waiting for %s to disappear....\n", + __func__, ifname); schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); mutex_lock(&pktgen_thread_lock); if (++i >= max_tries) { - printk(KERN_ERR "pktgen_mark_device: timed out after " - "waiting %d msec for device %s to be removed\n", - msec_per_try * i, ifname); + pr_err("%s: timed out after waiting %d msec for device %s to be removed\n", + __func__, msec_per_try * i, ifname); break; } @@ -1918,8 +1941,8 @@ static void pktgen_change_name(struct net_device *dev) &pktgen_if_fops, pkt_dev); if (!pkt_dev->entry) - printk(KERN_ERR "pktgen: can't move proc " - " entry for '%s'\n", dev->name); + pr_err("can't move proc entry for '%s'\n", + dev->name); break; } } @@ -1983,15 +2006,15 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) odev = pktgen_dev_get_by_name(pkt_dev, ifname); if (!odev) { - printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname); + pr_err("no such netdevice: \"%s\"\n", ifname); return -ENODEV; } if (odev->type != ARPHRD_ETHER) { - printk(KERN_ERR "pktgen: not an ethernet device: \"%s\"\n", ifname); + pr_err("not an ethernet device: \"%s\"\n", ifname); err = -EINVAL; } else if (!netif_running(odev)) { - printk(KERN_ERR "pktgen: device is down: \"%s\"\n", ifname); + pr_err("device is down: \"%s\"\n", ifname); err = -ENETDOWN; } else { pkt_dev->odev = odev; @@ -2010,8 +2033,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) int ntxq; if (!pkt_dev->odev) { - printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " - "setup_inject.\n"); + pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n"); sprintf(pkt_dev->result, "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); return; @@ -2021,19 +2043,15 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) ntxq = pkt_dev->odev->real_num_tx_queues; if (ntxq <= pkt_dev->queue_map_min) { - printk(KERN_WARNING "pktgen: WARNING: Requested " - "queue_map_min (zero-based) (%d) exceeds valid range " - "[0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, - pkt_dev->odevname); + pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, + pkt_dev->odevname); pkt_dev->queue_map_min = ntxq - 1; } if (pkt_dev->queue_map_max >= ntxq) { - printk(KERN_WARNING "pktgen: WARNING: Requested " - "queue_map_max (zero-based) (%d) exceeds valid range " - "[0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, - pkt_dev->odevname); + pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, + pkt_dev->odevname); pkt_dev->queue_map_max = ntxq - 1; } @@ -2093,8 +2111,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) } rcu_read_unlock(); if (err) - printk(KERN_ERR "pktgen: ERROR: IPv6 link " - "address not availble.\n"); + pr_err("ERROR: IPv6 link address not available\n"); } #endif } else { @@ -2142,15 +2159,15 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_set_expires(&t.timer, spin_until); - remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer)); + remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); if (remaining <= 0) { pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); return; } start_time = ktime_now(); - if (remaining < 100) - udelay(remaining); /* really small just spin */ + if (remaining < 100000) + ndelay(remaining); /* really small just spin */ else { /* see do_nanosleep */ hrtimer_init_sleeper(&t, current); @@ -2528,8 +2545,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev, if (nhead > 0) { ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); if (ret < 0) { - printk(KERN_ERR "Error expanding " - "ipsec packet %d\n", ret); + pr_err("Error expanding ipsec packet %d\n", + ret); goto err; } } @@ -2538,8 +2555,7 @@ static int process_ipsec(struct pktgen_dev *pkt_dev, skb_pull(skb, ETH_HLEN); ret = pktgen_output_ipsec(skb, pkt_dev); if (ret) { - printk(KERN_ERR "Error creating ipsec " - "packet %d\n", ret); + pr_err("Error creating ipsec packet %d\n", ret); goto err; } /* restore ll */ @@ -3015,8 +3031,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, if (datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); if (net_ratelimit()) - printk(KERN_INFO "pktgen: increased datalen to %d\n", - datalen); + pr_info("increased datalen to %d\n", datalen); } udph->source = htons(pkt_dev->cur_udp_src); @@ -3143,7 +3158,7 @@ static void pktgen_run(struct pktgen_thread *t) struct pktgen_dev *pkt_dev; int started = 0; - pr_debug("pktgen: entering pktgen_run. %p\n", t); + func_enter(); if_lock(t); list_for_each_entry(pkt_dev, &t->if_list, list) { @@ -3176,7 +3191,7 @@ static void pktgen_stop_all_threads_ifs(void) { struct pktgen_thread *t; - pr_debug("pktgen: entering pktgen_stop_all_threads_ifs.\n"); + func_enter(); mutex_lock(&pktgen_thread_lock); @@ -3241,7 +3256,7 @@ static void pktgen_run_all_threads(void) { struct pktgen_thread *t; - pr_debug("pktgen: entering pktgen_run_all_threads.\n"); + func_enter(); mutex_lock(&pktgen_thread_lock); @@ -3260,7 +3275,7 @@ static void pktgen_reset_all_threads(void) { struct pktgen_thread *t; - pr_debug("pktgen: entering pktgen_reset_all_threads.\n"); + func_enter(); mutex_lock(&pktgen_thread_lock); @@ -3310,8 +3325,8 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev) int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; if (!pkt_dev->running) { - printk(KERN_WARNING "pktgen: interface: %s is already " - "stopped\n", pkt_dev->odevname); + pr_warning("interface: %s is already stopped\n", + pkt_dev->odevname); return -EINVAL; } @@ -3347,7 +3362,7 @@ static void pktgen_stop(struct pktgen_thread *t) { struct pktgen_dev *pkt_dev; - pr_debug("pktgen: entering pktgen_stop\n"); + func_enter(); if_lock(t); @@ -3367,7 +3382,7 @@ static void pktgen_rem_one_if(struct pktgen_thread *t) struct list_head *q, *n; struct pktgen_dev *cur; - pr_debug("pktgen: entering pktgen_rem_one_if\n"); + func_enter(); if_lock(t); @@ -3393,9 +3408,10 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t) struct list_head *q, *n; struct pktgen_dev *cur; + func_enter(); + /* Remove all devices, free mem */ - pr_debug("pktgen: entering pktgen_rem_all_ifs\n"); if_lock(t); list_for_each_safe(q, n, &t->if_list) { @@ -3477,8 +3493,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->skb = fill_packet(odev, pkt_dev); if (pkt_dev->skb == NULL) { - printk(KERN_ERR "pktgen: ERROR: couldn't " - "allocate skb in fill_packet.\n"); + pr_err("ERROR: couldn't allocate skb in fill_packet\n"); schedule(); pkt_dev->clone_count--; /* back out increment, OOM */ return; @@ -3558,8 +3573,7 @@ static int pktgen_thread_worker(void *arg) init_waitqueue_head(&t->queue); complete(&t->start_done); - pr_debug("pktgen: starting pktgen/%d: pid=%d\n", - cpu, task_pid_nr(current)); + pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); set_current_state(TASK_INTERRUPTIBLE); @@ -3612,13 +3626,13 @@ static int pktgen_thread_worker(void *arg) set_current_state(TASK_INTERRUPTIBLE); } - pr_debug("pktgen: %s stopping all device\n", t->tsk->comm); + pr_debug("%s stopping all device\n", t->tsk->comm); pktgen_stop(t); - pr_debug("pktgen: %s removing all device\n", t->tsk->comm); + pr_debug("%s removing all device\n", t->tsk->comm); pktgen_rem_all_ifs(t); - pr_debug("pktgen: %s removing thread.\n", t->tsk->comm); + pr_debug("%s removing thread\n", t->tsk->comm); pktgen_rem_thread(t); return 0; @@ -3642,7 +3656,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, } if_unlock(t); - pr_debug("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev); + pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev); return pkt_dev; } @@ -3658,8 +3672,7 @@ static int add_dev_to_thread(struct pktgen_thread *t, if_lock(t); if (pkt_dev->pg_thread) { - printk(KERN_ERR "pktgen: ERROR: already assigned " - "to a thread.\n"); + pr_err("ERROR: already assigned to a thread\n"); rv = -EBUSY; goto out; } @@ -3685,7 +3698,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) pkt_dev = __pktgen_NN_threads(ifname, FIND); if (pkt_dev) { - printk(KERN_ERR "pktgen: ERROR: interface already used.\n"); + pr_err("ERROR: interface already used\n"); return -EBUSY; } @@ -3730,7 +3743,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir, &pktgen_if_fops, pkt_dev); if (!pkt_dev->entry) { - printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", + pr_err("cannot create %s/%s procfs entry\n", PG_PROC_DIR, ifname); err = -EINVAL; goto out2; @@ -3761,8 +3774,7 @@ static int __init pktgen_create_thread(int cpu) t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL, cpu_to_node(cpu)); if (!t) { - printk(KERN_ERR "pktgen: ERROR: out of memory, can't " - "create new thread.\n"); + pr_err("ERROR: out of memory, can't create new thread\n"); return -ENOMEM; } @@ -3776,8 +3788,7 @@ static int __init pktgen_create_thread(int cpu) p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); if (IS_ERR(p)) { - printk(KERN_ERR "pktgen: kernel_thread() failed " - "for cpu %d\n", t->cpu); + pr_err("kernel_thread() failed for cpu %d\n", t->cpu); list_del(&t->th_list); kfree(t); return PTR_ERR(p); @@ -3788,7 +3799,7 @@ static int __init pktgen_create_thread(int cpu) pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir, &pktgen_thread_fops, t); if (!pe) { - printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", + pr_err("cannot create %s/%s procfs entry\n", PG_PROC_DIR, t->tsk->comm); kthread_stop(p); list_del(&t->th_list); @@ -3822,11 +3833,10 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_dev) { - pr_debug("pktgen: remove_device pkt_dev=%p\n", pkt_dev); + pr_debug("remove_device pkt_dev=%p\n", pkt_dev); if (pkt_dev->running) { - printk(KERN_WARNING "pktgen: WARNING: trying to remove a " - "running interface, stopping it now.\n"); + pr_warning("WARNING: trying to remove a running interface, stopping it now\n"); pktgen_stop_device(pkt_dev); } @@ -3857,7 +3867,7 @@ static int __init pg_init(void) int cpu; struct proc_dir_entry *pe; - printk(KERN_INFO "%s", version); + pr_info("%s", version); pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net); if (!pg_proc_dir) @@ -3865,8 +3875,7 @@ static int __init pg_init(void) pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); if (pe == NULL) { - printk(KERN_ERR "pktgen: ERROR: cannot create %s " - "procfs entry.\n", PGCTRL); + pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); proc_net_remove(&init_net, PG_PROC_DIR); return -EINVAL; } @@ -3879,13 +3888,12 @@ static int __init pg_init(void) err = pktgen_create_thread(cpu); if (err) - printk(KERN_WARNING "pktgen: WARNING: Cannot create " - "thread for cpu %d (%d)\n", cpu, err); + pr_warning("WARNING: Cannot create thread for cpu %d (%d)\n", + cpu, err); } if (list_empty(&pktgen_threads)) { - printk(KERN_ERR "pktgen: ERROR: Initialization failed for " - "all threads\n"); + pr_err("ERROR: Initialization failed for all threads\n"); unregister_netdevice_notifier(&pktgen_notifier_block); remove_proc_entry(PGCTRL, pg_proc_dir); proc_net_remove(&init_net, PG_PROC_DIR); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a2af24e9e3d2b3754fa032e261a9480b4d0b7ac..f78d821bd9357dc3a88feb1259219478268858a3 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -579,7 +579,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, } static void copy_rtnl_link_stats(struct rtnl_link_stats *a, - const struct net_device_stats *b) + const struct rtnl_link_stats64 *b) { a->rx_packets = b->rx_packets; a->tx_packets = b->tx_packets; @@ -610,7 +610,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, a->tx_compressed = b->tx_compressed; } -static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) +static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) { struct rtnl_link_stats64 a; @@ -686,7 +686,7 @@ static size_t rtnl_port_size(const struct net_device *dev) return port_self_size; } -static inline size_t if_nlmsg_size(const struct net_device *dev) +static noinline size_t if_nlmsg_size(const struct net_device *dev) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ @@ -791,7 +791,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, { struct ifinfomsg *ifm; struct nlmsghdr *nlh; - const struct net_device_stats *stats; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats; struct nlattr *attr; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); @@ -847,7 +848,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, if (attr == NULL) goto nla_put_failure; - stats = dev_get_stats(dev); + stats = dev_get_stats(dev, &temp); copy_rtnl_link_stats(nla_data(attr), stats); attr = nla_reserve(skb, IFLA_STATS64, diff --git a/net/core/scm.c b/net/core/scm.c index b88f6f9d0b97503ddd353e163e73680704cde79a..413cab89017d0818c997b8d6a807997fa47c92d0 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -130,6 +130,7 @@ void __scm_destroy(struct scm_cookie *scm) } } } +EXPORT_SYMBOL(__scm_destroy); int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) { @@ -170,6 +171,30 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) err = scm_check_creds(&p->creds); if (err) goto error; + + if (pid_vnr(p->pid) != p->creds.pid) { + struct pid *pid; + err = -ESRCH; + pid = find_get_pid(p->creds.pid); + if (!pid) + goto error; + put_pid(p->pid); + p->pid = pid; + } + + if ((p->cred->euid != p->creds.uid) || + (p->cred->egid != p->creds.gid)) { + struct cred *cred; + err = -ENOMEM; + cred = prepare_creds(); + if (!cred) + goto error; + + cred->uid = cred->euid = p->creds.uid; + cred->gid = cred->egid = p->creds.uid; + put_cred(p->cred); + p->cred = cred; + } break; default: goto error; @@ -187,6 +212,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) scm_destroy(p); return err; } +EXPORT_SYMBOL(__scm_send); int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) { @@ -225,6 +251,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) out: return err; } +EXPORT_SYMBOL(put_cmsg); void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) { @@ -294,6 +321,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) */ __scm_destroy(scm); } +EXPORT_SYMBOL(scm_detach_fds); struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) { @@ -311,9 +339,4 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) } return new_fpl; } - -EXPORT_SYMBOL(__scm_destroy); -EXPORT_SYMBOL(__scm_send); -EXPORT_SYMBOL(put_cmsg); -EXPORT_SYMBOL(scm_detach_fds); EXPORT_SYMBOL(scm_fp_dup); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ce88293a34e263000dd93c0df5690885a857d797..3a2513f0d0c3036bf6c3f688f7e94b3e1d8f197c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -817,7 +817,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, memcpy(data + nhead, skb->head, skb->tail - skb->head); #endif memcpy(data + size, skb_end_pointer(skb), - sizeof(struct skb_shared_info)); + offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) get_page(skb_shinfo(skb)->frags[i].page); @@ -2486,7 +2486,6 @@ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) skb_postpull_rcsum(skb, skb->data, len); return skb->data += len; } - EXPORT_SYMBOL_GPL(skb_pull_rcsum); /** diff --git a/net/core/sock.c b/net/core/sock.c index 2cf7f9f7e775105ceef0bd630fd981dab2262952..b05b9b6ddb8700989e63e8597f6946ffd205bdba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -110,6 +110,7 @@ #include #include #include +#include #include #include @@ -156,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , - "sk_lock-AF_IEEE802154", + "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { @@ -172,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-27" , "slock-28" , "slock-AF_CAN" , "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , - "slock-AF_IEEE802154", + "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { @@ -188,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-27" , "clock-28" , "clock-AF_CAN" , "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , - "clock-AF_IEEE802154", + "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_MAX" }; @@ -749,6 +750,20 @@ int sock_setsockopt(struct socket *sock, int level, int optname, EXPORT_SYMBOL(sock_setsockopt); +void cred_to_ucred(struct pid *pid, const struct cred *cred, + struct ucred *ucred) +{ + ucred->pid = pid_vnr(pid); + ucred->uid = ucred->gid = -1; + if (cred) { + struct user_namespace *current_ns = current_user_ns(); + + ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); + ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); + } +} +EXPORT_SYMBOL_GPL(cred_to_ucred); + int sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -901,11 +916,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_PEERCRED: - if (len > sizeof(sk->sk_peercred)) - len = sizeof(sk->sk_peercred); - if (copy_to_user(optval, &sk->sk_peercred, len)) + { + struct ucred peercred; + if (len > sizeof(peercred)) + len = sizeof(peercred); + cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); + if (copy_to_user(optval, &peercred, len)) return -EFAULT; goto lenout; + } case SO_PEERNAME: { @@ -1119,6 +1138,9 @@ static void __sk_free(struct sock *sk) printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", __func__, atomic_read(&sk->sk_omem_alloc)); + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); + put_pid(sk->sk_peer_pid); put_net(sock_net(sk)); sk_prot_free(sk->sk_prot_creator, sk); } @@ -1317,9 +1339,10 @@ EXPORT_SYMBOL(sock_wfree); void sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; + unsigned int len = skb->truesize; - atomic_sub(skb->truesize, &sk->sk_rmem_alloc); - sk_mem_uncharge(skb->sk, skb->truesize); + atomic_sub(len, &sk->sk_rmem_alloc); + sk_mem_uncharge(sk, len); } EXPORT_SYMBOL(sock_rfree); @@ -1954,9 +1977,8 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; - sk->sk_peercred.pid = 0; - sk->sk_peercred.uid = -1; - sk->sk_peercred.gid = -1; + sk->sk_peer_pid = NULL; + sk->sk_peer_cred = NULL; sk->sk_write_pending = 0; sk->sk_rcvlowat = 1; sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; @@ -2210,8 +2232,7 @@ static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); #ifdef CONFIG_NET_NS void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { - int cpu = smp_processor_id(); - per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; + __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); @@ -2257,7 +2278,7 @@ static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { - __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; + __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); diff --git a/net/core/stream.c b/net/core/stream.c index cc196f42b8d89a24dd752d11879338d72e80346e..d959e0f41528ce71d69f4aafea0f6d028d72607a 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -43,7 +43,6 @@ void sk_stream_write_space(struct sock *sk) rcu_read_unlock(); } } - EXPORT_SYMBOL(sk_stream_write_space); /** @@ -81,7 +80,6 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) } while (!done); return 0; } - EXPORT_SYMBOL(sk_stream_wait_connect); /** @@ -109,7 +107,6 @@ void sk_stream_wait_close(struct sock *sk, long timeout) finish_wait(sk_sleep(sk), &wait); } } - EXPORT_SYMBOL(sk_stream_wait_close); /** @@ -174,7 +171,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) err = sock_intr_errno(*timeo_p); goto out; } - EXPORT_SYMBOL(sk_stream_wait_memory); int sk_stream_error(struct sock *sk, int flags, int err) @@ -185,7 +181,6 @@ int sk_stream_error(struct sock *sk, int flags, int err) send_sig(SIGPIPE, current, 0); return err; } - EXPORT_SYMBOL(sk_stream_error); void sk_stream_kill_queues(struct sock *sk) @@ -210,5 +205,4 @@ void sk_stream_kill_queues(struct sock *sk) * have gone away, only the net layer knows can touch it. */ } - EXPORT_SYMBOL(sk_stream_kill_queues); diff --git a/net/core/timestamping.c b/net/core/timestamping.c new file mode 100644 index 0000000000000000000000000000000000000000..0ae6c22da85b2a3516980691c8fbcf2f7228e3d6 --- /dev/null +++ b/net/core/timestamping.c @@ -0,0 +1,126 @@ +/* + * PTP 1588 clock support - support for timestamping in PHY devices + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include + +static struct sock_filter ptp_filter[] = { + PTP_FILTER +}; + +static unsigned int classify(struct sk_buff *skb) +{ + if (likely(skb->dev && + skb->dev->phydev && + skb->dev->phydev->drv)) + return sk_run_filter(skb, ptp_filter, ARRAY_SIZE(ptp_filter)); + else + return PTP_CLASS_NONE; +} + +void skb_clone_tx_timestamp(struct sk_buff *skb) +{ + struct phy_device *phydev; + struct sk_buff *clone; + struct sock *sk = skb->sk; + unsigned int type; + + if (!sk) + return; + + type = classify(skb); + + switch (type) { + case PTP_CLASS_V1_IPV4: + case PTP_CLASS_V1_IPV6: + case PTP_CLASS_V2_IPV4: + case PTP_CLASS_V2_IPV6: + case PTP_CLASS_V2_L2: + case PTP_CLASS_V2_VLAN: + phydev = skb->dev->phydev; + if (likely(phydev->drv->txtstamp)) { + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) + return; + clone->sk = sk; + phydev->drv->txtstamp(phydev, clone, type); + } + break; + default: + break; + } +} + +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock *sk = skb->sk; + struct sock_exterr_skb *serr; + int err; + + if (!hwtstamps) + return; + + *skb_hwtstamps(skb) = *hwtstamps; + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + skb->sk = NULL; + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); + +bool skb_defer_rx_timestamp(struct sk_buff *skb) +{ + struct phy_device *phydev; + unsigned int type; + + skb_push(skb, ETH_HLEN); + + type = classify(skb); + + skb_pull(skb, ETH_HLEN); + + switch (type) { + case PTP_CLASS_V1_IPV4: + case PTP_CLASS_V1_IPV6: + case PTP_CLASS_V2_IPV4: + case PTP_CLASS_V2_IPV6: + case PTP_CLASS_V2_L2: + case PTP_CLASS_V2_VLAN: + phydev = skb->dev->phydev; + if (likely(phydev->drv->rxtstamp)) + return phydev->drv->rxtstamp(phydev, skb, type); + break; + default: + break; + } + + return false; +} + +void __init skb_timestamping_init(void) +{ + BUG_ON(sk_chk_filter(ptp_filter, ARRAY_SIZE(ptp_filter))); +} diff --git a/net/core/utils.c b/net/core/utils.c index 838250241d26221f0ade44451b21697174185cab..f41854470539855adee387a4c488f4be66a017a2 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -77,7 +77,6 @@ __be32 in_aton(const char *str) } return(htonl(l)); } - EXPORT_SYMBOL(in_aton); #define IN6PTON_XDIGIT 0x00010000 @@ -162,7 +161,6 @@ int in4_pton(const char *src, int srclen, *end = s; return ret; } - EXPORT_SYMBOL(in4_pton); int in6_pton(const char *src, int srclen, @@ -280,7 +278,6 @@ int in6_pton(const char *src, int srclen, *end = s; return ret; } - EXPORT_SYMBOL(in6_pton); void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 01e4d39fa23274a3fe57c49342ab10db7af7a1f5..92a6fcb40d7daa4c3e1bc3b8e2b5a49c4948cf3d 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -82,7 +82,7 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) elapsed_time = delta / 10; if (elapsed_time != 0 && - dccp_insert_option_elapsed_time(sk, skb, elapsed_time)) + dccp_insert_option_elapsed_time(skb, elapsed_time)) return -1; avr = dccp_ackvec_record_new(); @@ -201,7 +201,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av, const unsigned int packets, const unsigned char state) { - unsigned int gap; + long gap; long new_head; if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN) diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index d3235899c7e3392ea2ef24ecc0a60c74e87ed508..95f7529864972aa34be0ba8ab3fd66b6eb640e47 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -715,9 +715,9 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) x_recv = htonl(hc->rx_x_recv); pinv = htonl(hc->rx_pinv); - if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, + if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE, &pinv, sizeof(pinv)) || - dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE, + dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE, &x_recv, sizeof(x_recv))) return -1; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index a10a61a1ded2911ee4a18ab0f3d75e9380513b82..3ccef1b70feef57556f6b5a71c6ed84bc1a12f02 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -446,16 +446,12 @@ extern void dccp_feat_list_purge(struct list_head *fn_list); extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb); extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*); -extern int dccp_insert_option_elapsed_time(struct sock *sk, - struct sk_buff *skb, - u32 elapsed_time); +extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed); extern u32 dccp_timestamp(void); extern void dccp_timestamping_init(void); -extern int dccp_insert_option_timestamp(struct sock *sk, - struct sk_buff *skb); -extern int dccp_insert_option(struct sock *sk, struct sk_buff *skb, - unsigned char option, - const void *value, unsigned char len); +extern int dccp_insert_option_timestamp(struct sk_buff *skb); +extern int dccp_insert_option(struct sk_buff *skb, unsigned char option, + const void *value, unsigned char len); #ifdef CONFIG_SYSCTL extern int dccp_sysctl_init(void); diff --git a/net/dccp/input.c b/net/dccp/input.c index 6beb6a7d6fbab77ebd161e09a910959dc57805df..10c957a88f4f7c9c65da3cf2ba825686349c03f4 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -430,7 +430,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, if (dccp_parse_options(sk, NULL, skb)) return 1; - /* Obtain usec RTT sample from SYN exchange (used by CCID 3) */ + /* Obtain usec RTT sample from SYN exchange (used by TFRC). */ if (likely(dp->dccps_options_received.dccpor_timestamp_echo)) dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp - dp->dccps_options_received.dccpor_timestamp_echo)); @@ -535,6 +535,8 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, const struct dccp_hdr *dh, const unsigned len) { + struct dccp_sock *dp = dccp_sk(sk); + u32 sample = dp->dccps_options_received.dccpor_timestamp_echo; int queued = 0; switch (dh->dccph_type) { @@ -559,7 +561,14 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, if (sk->sk_state == DCCP_PARTOPEN) inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); - dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq; + /* Obtain usec RTT sample from SYN exchange (used by TFRC). */ + if (likely(sample)) { + long delta = dccp_timestamp() - sample; + + dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta); + } + + dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_set_state(sk, DCCP_OPEN); if (dh->dccph_type == DCCP_PKT_DATAACK || diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index d9b11ef8694c1732b609d21782141eca71dcfcce..d4a166f0f391d6bfccd85471b2616057d808c848 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -105,7 +105,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) goto failure; /* OK, now commit destination to socket. */ - sk_setup_caps(sk, &rt->u.dst); + sk_setup_caps(sk, &rt->dst); dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, inet->inet_daddr, @@ -475,7 +475,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, return NULL; } - return &rt->u.dst; + return &rt->dst; } static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 091698899594388bbfb65678ce2ca8d1061283aa..6e3f32575df78bbf75fc8e3cd46ea0cb52f4f771 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -248,7 +248,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; struct ipv6_txoptions *opt = NULL; - struct in6_addr *final_p = NULL, final; + struct in6_addr *final_p, final; struct flowi fl; int err = -1; struct dst_entry *dst; @@ -265,13 +265,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, opt = np->opt; - if (opt != NULL && opt->srcrt != NULL) { - const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; - - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, opt, &final); err = ip6_dst_lookup(sk, &dst, &fl); if (err) @@ -545,19 +539,13 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, goto out_overflow; if (dst == NULL) { - struct in6_addr *final_p = NULL, final; + struct in6_addr *final_p, final; struct flowi fl; memset(&fl, 0, sizeof(fl)); fl.proto = IPPROTO_DCCP; ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); - if (opt != NULL && opt->srcrt != NULL) { - const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; - - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, opt, &final); ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); fl.oif = sk->sk_bound_dev_if; fl.fl_ip_dport = inet_rsk(req)->rmt_port; @@ -885,7 +873,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct dccp_sock *dp = dccp_sk(sk); - struct in6_addr *saddr = NULL, *final_p = NULL, final; + struct in6_addr *saddr = NULL, *final_p, final; struct flowi fl; struct dst_entry *dst; int addr_type; @@ -988,13 +976,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl.fl_ip_sport = inet->inet_sport; security_sk_classify_flow(sk, &fl); - if (np->opt != NULL && np->opt->srcrt != NULL) { - const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; - - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, np->opt, &final); err = ip6_dst_lookup(sk, &dst, &fl); if (err) diff --git a/net/dccp/options.c b/net/dccp/options.c index 07395f861d35aa819e154ebc66114472687e7a6a..bfda087bd90dd792f8190840878379d304152c1d 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -299,9 +299,8 @@ static inline u8 dccp_ndp_len(const u64 ndp) return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); } -int dccp_insert_option(struct sock *sk, struct sk_buff *skb, - const unsigned char option, - const void *value, const unsigned char len) +int dccp_insert_option(struct sk_buff *skb, const unsigned char option, + const void *value, const unsigned char len) { unsigned char *to; @@ -354,8 +353,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time) return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4; } -int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb, - u32 elapsed_time) +int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time) { const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time); const int len = 2 + elapsed_time_len; @@ -386,13 +384,13 @@ int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb, EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time); -int dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb) +int dccp_insert_option_timestamp(struct sk_buff *skb) { __be32 now = htonl(dccp_timestamp()); /* yes this will overflow but that is the point as we want a * 10 usec 32 bit timer which mean it wraps every 11.9 hours */ - return dccp_insert_option(sk, skb, DCCPO_TIMESTAMP, &now, sizeof(now)); + return dccp_insert_option(skb, DCCPO_TIMESTAMP, &now, sizeof(now)); } EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp); @@ -531,9 +529,9 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb) if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) { /* * Obtain RTT sample from Request/Response exchange. - * This is currently used in CCID 3 initialisation. + * This is currently used for TFRC initialisation. */ - if (dccp_insert_option_timestamp(sk, skb)) + if (dccp_insert_option_timestamp(skb)) return -1; } else if (dp->dccps_hc_rx_ackvec != NULL && @@ -564,6 +562,10 @@ int dccp_insert_options_rsk(struct dccp_request_sock *dreq, struct sk_buff *skb) if (dccp_feat_insert_opts(NULL, dreq, skb)) return -1; + /* Obtain RTT sample from Response/Ack exchange (used by TFRC). */ + if (dccp_insert_option_timestamp(skb)) + return -1; + if (dreq->dreq_timestamp_echo != 0 && dccp_insert_option_timestamp_echo(NULL, dreq, skb)) return -1; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b03ecf6b2bb052cf8c277c9d5ff3f5c3e96774ef..096250d1323b9960c0a73d449c9b6589077e7015 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -473,14 +473,9 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type, if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) return -EINVAL; - val = kmalloc(optlen, GFP_KERNEL); - if (val == NULL) - return -ENOMEM; - - if (copy_from_user(val, optval, optlen)) { - kfree(val); - return -EFAULT; - } + val = memdup_user(optval, optlen); + if (IS_ERR(val)) + return PTR_ERR(val); lock_sock(sk); if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) @@ -1007,7 +1002,8 @@ EXPORT_SYMBOL_GPL(dccp_shutdown); static inline int dccp_mib_init(void) { return snmp_mib_init((void __percpu **)dccp_statistics, - sizeof(struct dccp_mib)); + sizeof(struct dccp_mib), + __alignof__(struct dccp_mib)); } static inline void dccp_mib_exit(void) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 812e6dff60675d5971b03d2d4b834ac530121e82..6585ea6d1182798ef399c61950e06044b5180027 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -146,13 +146,13 @@ static __inline__ unsigned dn_hash(__le16 src, __le16 dst) static inline void dnrt_free(struct dn_route *rt) { - call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); + call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } static inline void dnrt_drop(struct dn_route *rt) { - dst_release(&rt->u.dst); - call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); + dst_release(&rt->dst); + call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } static void dn_dst_check_expire(unsigned long dummy) @@ -167,13 +167,13 @@ static void dn_dst_check_expire(unsigned long dummy) spin_lock(&dn_rt_hash_table[i].lock); while((rt=*rtp) != NULL) { - if (atomic_read(&rt->u.dst.__refcnt) || - (now - rt->u.dst.lastuse) < expire) { - rtp = &rt->u.dst.dn_next; + if (atomic_read(&rt->dst.__refcnt) || + (now - rt->dst.lastuse) < expire) { + rtp = &rt->dst.dn_next; continue; } - *rtp = rt->u.dst.dn_next; - rt->u.dst.dn_next = NULL; + *rtp = rt->dst.dn_next; + rt->dst.dn_next = NULL; dnrt_free(rt); } spin_unlock(&dn_rt_hash_table[i].lock); @@ -198,13 +198,13 @@ static int dn_dst_gc(struct dst_ops *ops) rtp = &dn_rt_hash_table[i].chain; while((rt=*rtp) != NULL) { - if (atomic_read(&rt->u.dst.__refcnt) || - (now - rt->u.dst.lastuse) < expire) { - rtp = &rt->u.dst.dn_next; + if (atomic_read(&rt->dst.__refcnt) || + (now - rt->dst.lastuse) < expire) { + rtp = &rt->dst.dn_next; continue; } - *rtp = rt->u.dst.dn_next; - rt->u.dst.dn_next = NULL; + *rtp = rt->dst.dn_next; + rt->dst.dn_next = NULL; dnrt_drop(rt); break; } @@ -287,25 +287,25 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route * while((rth = *rthp) != NULL) { if (compare_keys(&rth->fl, &rt->fl)) { /* Put it first */ - *rthp = rth->u.dst.dn_next; - rcu_assign_pointer(rth->u.dst.dn_next, + *rthp = rth->dst.dn_next; + rcu_assign_pointer(rth->dst.dn_next, dn_rt_hash_table[hash].chain); rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); - dst_use(&rth->u.dst, now); + dst_use(&rth->dst, now); spin_unlock_bh(&dn_rt_hash_table[hash].lock); dnrt_drop(rt); *rp = rth; return 0; } - rthp = &rth->u.dst.dn_next; + rthp = &rth->dst.dn_next; } - rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain); + rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain); rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); - dst_use(&rt->u.dst, now); + dst_use(&rt->dst, now); spin_unlock_bh(&dn_rt_hash_table[hash].lock); *rp = rt; return 0; @@ -323,8 +323,8 @@ static void dn_run_flush(unsigned long dummy) goto nothing_to_declare; for(; rt; rt=next) { - next = rt->u.dst.dn_next; - rt->u.dst.dn_next = NULL; + next = rt->dst.dn_next; + rt->dst.dn_next = NULL; dst_free((struct dst_entry *)rt); } @@ -743,7 +743,7 @@ static int dn_forward(struct sk_buff *skb) /* Ensure that we have enough space for headers */ rt = (struct dn_route *)skb_dst(skb); header_len = dn_db->use_long ? 21 : 6; - if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) + if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) goto drop; /* @@ -752,7 +752,7 @@ static int dn_forward(struct sk_buff *skb) if (++cb->hops > 30) goto drop; - skb->dev = rt->u.dst.dev; + skb->dev = rt->dst.dev; /* * If packet goes out same interface it came in on, then set @@ -792,7 +792,7 @@ static int dn_rt_bug(struct sk_buff *skb) static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) { struct dn_fib_info *fi = res->fi; - struct net_device *dev = rt->u.dst.dev; + struct net_device *dev = rt->dst.dev; struct neighbour *n; unsigned mss; @@ -800,25 +800,25 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) if (DN_FIB_RES_GW(*res) && DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) rt->rt_gateway = DN_FIB_RES_GW(*res); - memcpy(rt->u.dst.metrics, fi->fib_metrics, - sizeof(rt->u.dst.metrics)); + memcpy(rt->dst.metrics, fi->fib_metrics, + sizeof(rt->dst.metrics)); } rt->rt_type = res->type; - if (dev != NULL && rt->u.dst.neighbour == NULL) { + if (dev != NULL && rt->dst.neighbour == NULL) { n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); if (IS_ERR(n)) return PTR_ERR(n); - rt->u.dst.neighbour = n; + rt->dst.neighbour = n; } - if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 || - dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu) - rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; - mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); - if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 || - dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss) - rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; + if (dst_metric(&rt->dst, RTAX_MTU) == 0 || + dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) + rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; + mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); + if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 || + dst_metric(&rt->dst, RTAX_ADVMSS) > mss) + rt->dst.metrics[RTAX_ADVMSS-1] = mss; return 0; } @@ -1096,8 +1096,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old if (rt == NULL) goto e_nobufs; - atomic_set(&rt->u.dst.__refcnt, 1); - rt->u.dst.flags = DST_HOST; + atomic_set(&rt->dst.__refcnt, 1); + rt->dst.flags = DST_HOST; rt->fl.fld_src = oldflp->fld_src; rt->fl.fld_dst = oldflp->fld_dst; @@ -1113,17 +1113,17 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old rt->rt_dst_map = fl.fld_dst; rt->rt_src_map = fl.fld_src; - rt->u.dst.dev = dev_out; + rt->dst.dev = dev_out; dev_hold(dev_out); - rt->u.dst.neighbour = neigh; + rt->dst.neighbour = neigh; neigh = NULL; - rt->u.dst.lastuse = jiffies; - rt->u.dst.output = dn_output; - rt->u.dst.input = dn_rt_bug; + rt->dst.lastuse = jiffies; + rt->dst.output = dn_output; + rt->dst.input = dn_rt_bug; rt->rt_flags = flags; if (flags & RTCF_LOCAL) - rt->u.dst.input = dn_nsp_rx; + rt->dst.input = dn_nsp_rx; err = dn_rt_set_next_hop(rt, &res); if (err) @@ -1152,7 +1152,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old err = -ENOBUFS; goto done; e_neighbour: - dst_free(&rt->u.dst); + dst_free(&rt->dst); goto e_nobufs; } @@ -1168,15 +1168,15 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl if (!(flags & MSG_TRYHARD)) { rcu_read_lock_bh(); for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; - rt = rcu_dereference_bh(rt->u.dst.dn_next)) { + rt = rcu_dereference_bh(rt->dst.dn_next)) { if ((flp->fld_dst == rt->fl.fld_dst) && (flp->fld_src == rt->fl.fld_src) && (flp->mark == rt->fl.mark) && (rt->fl.iif == 0) && (rt->fl.oif == flp->oif)) { - dst_use(&rt->u.dst, jiffies); + dst_use(&rt->dst, jiffies); rcu_read_unlock_bh(); - *pprt = &rt->u.dst; + *pprt = &rt->dst; return 0; } } @@ -1375,29 +1375,29 @@ static int dn_route_input_slow(struct sk_buff *skb) rt->fl.iif = in_dev->ifindex; rt->fl.mark = fl.mark; - rt->u.dst.flags = DST_HOST; - rt->u.dst.neighbour = neigh; - rt->u.dst.dev = out_dev; - rt->u.dst.lastuse = jiffies; - rt->u.dst.output = dn_rt_bug; + rt->dst.flags = DST_HOST; + rt->dst.neighbour = neigh; + rt->dst.dev = out_dev; + rt->dst.lastuse = jiffies; + rt->dst.output = dn_rt_bug; switch(res.type) { case RTN_UNICAST: - rt->u.dst.input = dn_forward; + rt->dst.input = dn_forward; break; case RTN_LOCAL: - rt->u.dst.output = dn_output; - rt->u.dst.input = dn_nsp_rx; - rt->u.dst.dev = in_dev; + rt->dst.output = dn_output; + rt->dst.input = dn_nsp_rx; + rt->dst.dev = in_dev; flags |= RTCF_LOCAL; break; default: case RTN_UNREACHABLE: case RTN_BLACKHOLE: - rt->u.dst.input = dst_discard; + rt->dst.input = dst_discard; } rt->rt_flags = flags; - if (rt->u.dst.dev) - dev_hold(rt->u.dst.dev); + if (rt->dst.dev) + dev_hold(rt->dst.dev); err = dn_rt_set_next_hop(rt, &res); if (err) @@ -1405,7 +1405,7 @@ static int dn_route_input_slow(struct sk_buff *skb) hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); dn_insert_route(rt, hash, &rt); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); done: if (neigh) @@ -1427,7 +1427,7 @@ static int dn_route_input_slow(struct sk_buff *skb) goto done; e_neighbour: - dst_free(&rt->u.dst); + dst_free(&rt->dst); goto done; } @@ -1442,13 +1442,13 @@ static int dn_route_input(struct sk_buff *skb) rcu_read_lock(); for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; - rt = rcu_dereference(rt->u.dst.dn_next)) { + rt = rcu_dereference(rt->dst.dn_next)) { if ((rt->fl.fld_src == cb->src) && (rt->fl.fld_dst == cb->dst) && (rt->fl.oif == 0) && (rt->fl.mark == skb->mark) && (rt->fl.iif == cb->iif)) { - dst_use(&rt->u.dst, jiffies); + dst_use(&rt->dst, jiffies); rcu_read_unlock(); skb_dst_set(skb, (struct dst_entry *)rt); return 0; @@ -1487,8 +1487,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, r->rtm_src_len = 16; RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); } - if (rt->u.dst.dev) - RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); + if (rt->dst.dev) + RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); /* * Note to self - change this if input routes reverse direction when * they deal only with inputs and not with replies like they do @@ -1497,11 +1497,11 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); if (rt->rt_daddr != rt->rt_gateway) RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); - if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) + if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) goto rtattr_failure; - expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; - if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires, - rt->u.dst.error) < 0) + expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; + if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, + rt->dst.error) < 0) goto rtattr_failure; if (rt->fl.iif) RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); @@ -1568,8 +1568,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void local_bh_enable(); memset(cb, 0, sizeof(struct dn_skb_cb)); rt = (struct dn_route *)skb_dst(skb); - if (!err && -rt->u.dst.error) - err = rt->u.dst.error; + if (!err && -rt->dst.error) + err = rt->dst.error; } else { int oif = 0; if (rta[RTA_OIF - 1]) @@ -1583,7 +1583,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void skb->dev = NULL; if (err) goto out_free; - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); if (rtm->rtm_flags & RTM_F_NOTIFY) rt->rt_flags |= RTCF_NOTIFY; @@ -1632,10 +1632,10 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock_bh(); for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; rt; - rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) { + rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { if (idx < s_idx) continue; - skb_dst_set(skb, dst_clone(&rt->u.dst)); + skb_dst_set(skb, dst_clone(&rt->dst)); if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1, NLM_F_MULTI) <= 0) { @@ -1678,7 +1678,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou { struct dn_rt_cache_iter_state *s = seq->private; - rt = rt->u.dst.dn_next; + rt = rt->dst.dn_next; while(!rt) { rcu_read_unlock_bh(); if (--s->bucket < 0) @@ -1719,12 +1719,12 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", - rt->u.dst.dev ? rt->u.dst.dev->name : "*", + rt->dst.dev ? rt->dst.dev->name : "*", dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), - atomic_read(&rt->u.dst.__refcnt), - rt->u.dst.__use, - (int) dst_metric(&rt->u.dst, RTAX_RTT)); + atomic_read(&rt->dst.__refcnt), + rt->dst.__use, + (int) dst_metric(&rt->dst, RTAX_RTT)); return 0; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 8fdca56bb08f33817645ae55f179fbe6a8ad9c1e..64ca2a6fa0d4449f58c9befbdcbd4d8c31b3cdb0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -164,10 +164,9 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct dsa_slave_priv *p = netdev_priv(dev); - struct mii_ioctl_data *mii_data = if_mii(ifr); if (p->phy != NULL) - return phy_mii_ioctl(p->phy, mii_data, cmd); + return phy_mii_ioctl(p->phy, ifr, cmd); return -EOPNOTSUPP; } diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 2a5a8053e0004fb1523bb7d21436eefd618a907a..dc54bd0d083ba63f9cdec105d38d05d304007357 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -48,7 +48,7 @@ static const struct proto_ops econet_ops; static struct hlist_head econet_sklist; -static DEFINE_RWLOCK(econet_lock); +static DEFINE_SPINLOCK(econet_lock); static DEFINE_MUTEX(econet_mutex); /* Since there are only 256 possible network numbers (or fewer, depends @@ -98,16 +98,16 @@ struct ec_cb static void econet_remove_socket(struct hlist_head *list, struct sock *sk) { - write_lock_bh(&econet_lock); + spin_lock_bh(&econet_lock); sk_del_node_init(sk); - write_unlock_bh(&econet_lock); + spin_unlock_bh(&econet_lock); } static void econet_insert_socket(struct hlist_head *list, struct sock *sk) { - write_lock_bh(&econet_lock); + spin_lock_bh(&econet_lock); sk_add_node(sk, list); - write_unlock_bh(&econet_lock); + spin_unlock_bh(&econet_lock); } /* @@ -782,15 +782,19 @@ static struct sock *ec_listening_socket(unsigned char port, unsigned char struct sock *sk; struct hlist_node *node; + spin_lock(&econet_lock); sk_for_each(sk, node, &econet_sklist) { struct econet_sock *opt = ec_sk(sk); if ((opt->port == port || opt->port == 0) && (opt->station == station || opt->station == 0) && - (opt->net == net || opt->net == 0)) + (opt->net == net || opt->net == 0)) { + sock_hold(sk); goto found; + } } sk = NULL; found: + spin_unlock(&econet_lock); return sk; } @@ -852,7 +856,7 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) { struct iphdr *ip = ip_hdr(skb); unsigned char stn = ntohl(ip->saddr) & 0xff; - struct sock *sk; + struct sock *sk = NULL; struct sk_buff *newskb; struct ec_device *edev = skb->dev->ec_ptr; @@ -882,10 +886,13 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) } aun_send_response(ip->saddr, ah->handle, 3, 0); + sock_put(sk); return; bad: aun_send_response(ip->saddr, ah->handle, 4, 0); + if (sk) + sock_put(sk); } /* @@ -1050,7 +1057,7 @@ static int __init aun_udp_initialise(void) static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct ec_framehdr *hdr; - struct sock *sk; + struct sock *sk = NULL; struct ec_device *edev = dev->ec_ptr; if (!net_eq(dev_net(dev), &init_net)) @@ -1085,10 +1092,12 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb, hdr->port)) goto drop; - + sock_put(sk); return NET_RX_SUCCESS; drop: + if (sk) + sock_put(sk); kfree_skb(skb); return NET_RX_DROP; } diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 61ec0329316c1353fb5f59f3e1e24d0cb397a19e..215c83986a9d4562f472a07e05bbbf607c56bbb0 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -158,7 +158,6 @@ EXPORT_SYMBOL(eth_rebuild_header); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eth; - unsigned char *rawp; skb->dev = dev; skb_reset_mac_header(skb); @@ -199,15 +198,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; - rawp = skb->data; - /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ - if (*(unsigned short *)rawp == 0xFFFF) + if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF) return htons(ETH_P_802_3); /* diff --git a/net/ethernet/pe2.c b/net/ethernet/pe2.c index eb00796758c312a8eb6be570977c508e3dbb48ec..85d574addbc1757be0bd9fa5b4c3d7b99521d092 100644 --- a/net/ethernet/pe2.c +++ b/net/ethernet/pe2.c @@ -28,11 +28,10 @@ struct datalink_proto *make_EII_client(void) return proto; } +EXPORT_SYMBOL(make_EII_client); void destroy_EII_client(struct datalink_proto *dl) { kfree(dl); } - EXPORT_SYMBOL(destroy_EII_client); -EXPORT_SYMBOL(make_EII_client); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 551ce564b035592f54f1d6330d98454570bf3afd..6a1100c25a9f881f204934b1687b422384527f9a 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -355,6 +355,8 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, inet = inet_sk(sk); inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; + inet->nodefrag = 0; + if (SOCK_RAW == sock->type) { inet->inet_num = protocol; if (IPPROTO_RAW == protocol) @@ -725,28 +727,31 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, sock_rps_record_flow(sk); /* We may need to bind the socket. */ - if (!inet_sk(sk)->inet_num && inet_autobind(sk)) + if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && + inet_autobind(sk)) return -EAGAIN; return sk->sk_prot->sendmsg(iocb, sk, msg, size); } EXPORT_SYMBOL(inet_sendmsg); -static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, - size_t size, int flags) +ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags) { struct sock *sk = sock->sk; sock_rps_record_flow(sk); /* We may need to bind the socket. */ - if (!inet_sk(sk)->inet_num && inet_autobind(sk)) + if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && + inet_autobind(sk)) return -EAGAIN; if (sk->sk_prot->sendpage) return sk->sk_prot->sendpage(sk, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } +EXPORT_SYMBOL(inet_sendpage); int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) @@ -892,10 +897,10 @@ const struct proto_ops inet_stream_ops = { .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, - .sendmsg = tcp_sendmsg, + .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, - .sendpage = tcp_sendpage, + .sendpage = inet_sendpage, .splice_read = tcp_splice_read, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, @@ -1100,7 +1105,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) if (err) return err; - sk_setup_caps(sk, &rt->u.dst); + sk_setup_caps(sk, &rt->dst); new_saddr = rt->rt_src; @@ -1166,7 +1171,7 @@ int inet_sk_rebuild_header(struct sock *sk) err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0); } if (!err) - sk_setup_caps(sk, &rt->u.dst); + sk_setup_caps(sk, &rt->dst); else { /* Routing failed... */ sk->sk_route_caps = 0; @@ -1425,13 +1430,49 @@ unsigned long snmp_fold_field(void __percpu *mib[], int offt) } EXPORT_SYMBOL_GPL(snmp_fold_field); -int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) +#if BITS_PER_LONG==32 + +u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset) +{ + u64 res = 0; + int cpu; + + for_each_possible_cpu(cpu) { + void *bhptr, *userptr; + struct u64_stats_sync *syncp; + u64 v_bh, v_user; + unsigned int start; + + /* first mib used by softirq context, we must use _bh() accessors */ + bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu); + syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); + do { + start = u64_stats_fetch_begin_bh(syncp); + v_bh = *(((u64 *) bhptr) + offt); + } while (u64_stats_fetch_retry_bh(syncp, start)); + + /* second mib used in USER context */ + userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu); + syncp = (struct u64_stats_sync *)(userptr + syncp_offset); + do { + start = u64_stats_fetch_begin(syncp); + v_user = *(((u64 *) userptr) + offt); + } while (u64_stats_fetch_retry(syncp, start)); + + res += v_bh + v_user; + } + return res; +} +EXPORT_SYMBOL_GPL(snmp_fold_field64); +#endif + +int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align) { BUG_ON(ptr == NULL); - ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long)); + ptr[0] = __alloc_percpu(mibsize, align); if (!ptr[0]) goto err0; - ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long)); + ptr[1] = __alloc_percpu(mibsize, align); if (!ptr[1]) goto err1; return 0; @@ -1488,25 +1529,32 @@ static const struct net_protocol icmp_protocol = { static __net_init int ipv4_mib_init_net(struct net *net) { if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics, - sizeof(struct tcp_mib)) < 0) + sizeof(struct tcp_mib), + __alignof__(struct tcp_mib)) < 0) goto err_tcp_mib; if (snmp_mib_init((void __percpu **)net->mib.ip_statistics, - sizeof(struct ipstats_mib)) < 0) + sizeof(struct ipstats_mib), + __alignof__(struct ipstats_mib)) < 0) goto err_ip_mib; if (snmp_mib_init((void __percpu **)net->mib.net_statistics, - sizeof(struct linux_mib)) < 0) + sizeof(struct linux_mib), + __alignof__(struct linux_mib)) < 0) goto err_net_mib; if (snmp_mib_init((void __percpu **)net->mib.udp_statistics, - sizeof(struct udp_mib)) < 0) + sizeof(struct udp_mib), + __alignof__(struct udp_mib)) < 0) goto err_udp_mib; if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics, - sizeof(struct udp_mib)) < 0) + sizeof(struct udp_mib), + __alignof__(struct udp_mib)) < 0) goto err_udplite_mib; if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics, - sizeof(struct icmp_mib)) < 0) + sizeof(struct icmp_mib), + __alignof__(struct icmp_mib)) < 0) goto err_icmp_mib; if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics, - sizeof(struct icmpmsg_mib)) < 0) + sizeof(struct icmpmsg_mib), + __alignof__(struct icmpmsg_mib)) < 0) goto err_icmpmsg_mib; tcp_mib_init(net); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index f094b75810db5fd44f29e14e4ff3b244441bdee1..96c1955b3e2f5b47181c6fe331b581fc161baf27 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -116,6 +116,7 @@ #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) #include struct neigh_table *clip_tbl_hook; +EXPORT_SYMBOL(clip_tbl_hook); #endif #include @@ -169,6 +170,7 @@ const struct neigh_ops arp_broken_ops = { .hh_output = dev_queue_xmit, .queue_xmit = dev_queue_xmit, }; +EXPORT_SYMBOL(arp_broken_ops); struct neigh_table arp_tbl = { .family = AF_INET, @@ -198,6 +200,7 @@ struct neigh_table arp_tbl = { .gc_thresh2 = 512, .gc_thresh3 = 1024, }; +EXPORT_SYMBOL(arp_tbl); int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) { @@ -333,11 +336,14 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) struct net_device *dev = neigh->dev; __be32 target = *(__be32*)neigh->primary_key; int probes = atomic_read(&neigh->probes); - struct in_device *in_dev = in_dev_get(dev); + struct in_device *in_dev; - if (!in_dev) + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + if (!in_dev) { + rcu_read_unlock(); return; - + } switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { default: case 0: /* By default announce any local IP */ @@ -358,9 +364,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) case 2: /* Avoid secondary IPs, get a primary/preferred one */ break; } + rcu_read_unlock(); - if (in_dev) - in_dev_put(in_dev); if (!saddr) saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); @@ -427,7 +432,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) if (ip_route_output_key(net, &rt, &fl) < 0) return 1; - if (rt->u.dst.dev != dev) { + if (rt->dst.dev != dev) { NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); flag = 1; } @@ -497,6 +502,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb) kfree_skb(skb); return 1; } +EXPORT_SYMBOL(arp_find); /* END OF OBSOLETE FUNCTIONS */ @@ -532,7 +538,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct in_device *out_dev; int imi, omi = -1; - if (rt->u.dst.dev == dev) + if (rt->dst.dev == dev) return 0; if (!IN_DEV_PROXY_ARP(in_dev)) @@ -545,10 +551,10 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, /* place to check for proxy_arp for routes */ - if ((out_dev = in_dev_get(rt->u.dst.dev)) != NULL) { + out_dev = __in_dev_get_rcu(rt->dst.dev); + if (out_dev) omi = IN_DEV_MEDIUM_ID(out_dev); - in_dev_put(out_dev); - } + return (omi != imi && omi != -1); } @@ -576,7 +582,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev, __be32 sip, __be32 tip) { /* Private VLAN is only concerned about the same ethernet segment */ - if (rt->u.dst.dev != dev) + if (rt->dst.dev != dev) return 0; /* Don't reply on self probes (often done by windowz boxes)*/ @@ -698,6 +704,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, kfree_skb(skb); return NULL; } +EXPORT_SYMBOL(arp_create); /* * Send an arp packet. @@ -707,6 +714,7 @@ void arp_xmit(struct sk_buff *skb) /* Send it off, maybe filter it using firewalling first. */ NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit); } +EXPORT_SYMBOL(arp_xmit); /* * Create and send an arp packet. @@ -733,6 +741,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, arp_xmit(skb); } +EXPORT_SYMBOL(arp_send); /* * Process an arp request. @@ -741,7 +750,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, static int arp_process(struct sk_buff *skb) { struct net_device *dev = skb->dev; - struct in_device *in_dev = in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rcu(dev); struct arphdr *arp; unsigned char *arp_ptr; struct rtable *rt; @@ -890,7 +899,6 @@ static int arp_process(struct sk_buff *skb) arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); } else { pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); - in_dev_put(in_dev); return 0; } goto out; @@ -936,8 +944,6 @@ static int arp_process(struct sk_buff *skb) } out: - if (in_dev) - in_dev_put(in_dev); consume_skb(skb); return 0; } @@ -1045,7 +1051,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, struct rtable * rt; if ((err = ip_route_output_key(net, &rt, &fl)) != 0) return err; - dev = rt->u.dst.dev; + dev = rt->dst.dev; ip_rt_put(rt); if (!dev) return -EINVAL; @@ -1152,7 +1158,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, struct rtable * rt; if ((err = ip_route_output_key(net, &rt, &fl)) != 0) return err; - dev = rt->u.dst.dev; + dev = rt->dst.dev; ip_rt_put(rt); if (!dev) return -EINVAL; @@ -1453,14 +1459,3 @@ static int __init arp_proc_init(void) } #endif /* CONFIG_PROC_FS */ - -EXPORT_SYMBOL(arp_broken_ops); -EXPORT_SYMBOL(arp_find); -EXPORT_SYMBOL(arp_create); -EXPORT_SYMBOL(arp_xmit); -EXPORT_SYMBOL(arp_send); -EXPORT_SYMBOL(arp_tbl); - -#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) -EXPORT_SYMBOL(clip_tbl_hook); -#endif diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index fb2465811b48658b6be95d6cfd2498c3f8d41da7..f0550941df7b9e1ac63468384eba7ede1bf6d537 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -69,9 +69,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sk->sk_state = TCP_ESTABLISHED; inet->inet_id = jiffies; - sk_dst_set(sk, &rt->u.dst); + sk_dst_set(sk, &rt->dst); return(0); } - EXPORT_SYMBOL(ip4_datagram_connect); - diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 382bc768ed565cc79bc79c8c71fa0588d2a7a78b..da14c49284f41677f17d273badbbbd0560381ddb 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1081,6 +1081,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, } ip_mc_up(in_dev); /* fall through */ + case NETDEV_NOTIFY_PEERS: case NETDEV_CHANGEADDR: /* Send gratuitous ARP to notify of link change */ if (IN_DEV_ARP_NOTIFY(in_dev)) { diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 4f0ed458c883658c37265fe6537bde2ac8b88429..a43968918350244a057e6f3364727d6a2aa7baf2 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -175,6 +175,7 @@ struct net_device * ip_dev_find(struct net *net, __be32 addr) fib_res_put(&res); return dev; } +EXPORT_SYMBOL(ip_dev_find); /* * Find address type as if only "dev" was present in the system. If @@ -214,12 +215,14 @@ unsigned int inet_addr_type(struct net *net, __be32 addr) { return __inet_dev_addr_type(net, NULL, addr); } +EXPORT_SYMBOL(inet_addr_type); unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr) { return __inet_dev_addr_type(net, dev, addr); } +EXPORT_SYMBOL(inet_dev_addr_type); /* Given (packet source, input interface) and optional (dst, oif, tos): - (main) check, that source is valid i.e. not broadcast or our local @@ -284,7 +287,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, if (no_addr) goto last_resort; if (rpf == 1) - goto e_inval; + goto e_rpf; fl.oif = dev->ifindex; ret = 0; @@ -299,7 +302,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, last_resort: if (rpf) - goto e_inval; + goto e_rpf; *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); *itag = 0; return 0; @@ -308,6 +311,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, fib_res_put(&res); e_inval: return -EINVAL; +e_rpf: + return -EXDEV; } static inline __be32 sk_extract_addr(struct sockaddr *addr) @@ -1075,7 +1080,3 @@ void __init ip_fib_init(void) fib_hash_init(); } - -EXPORT_SYMBOL(inet_addr_type); -EXPORT_SYMBOL(inet_dev_addr_type); -EXPORT_SYMBOL(ip_dev_find); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index d65e9215bcd77c2b15d6facdc0d75a67310c68f2..a0d847c7cba5c89aec43b340036bf456468f9e9a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -181,6 +181,7 @@ const struct icmp_err icmp_err_convert[] = { .fatal = 1, }, }; +EXPORT_SYMBOL(icmp_err_convert); /* * ICMP control array. This specifies what to do with each ICMP. @@ -267,11 +268,12 @@ int xrlim_allow(struct dst_entry *dst, int timeout) dst->rate_tokens = token; return rc; } +EXPORT_SYMBOL(xrlim_allow); static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt, int type, int code) { - struct dst_entry *dst = &rt->u.dst; + struct dst_entry *dst = &rt->dst; int rc = 1; if (type > NR_ICMP_TYPES) @@ -327,7 +329,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, struct sock *sk; struct sk_buff *skb; - sk = icmp_sk(dev_net((*rt)->u.dst.dev)); + sk = icmp_sk(dev_net((*rt)->dst.dev)); if (ip_append_data(sk, icmp_glue_bits, icmp_param, icmp_param->data_len+icmp_param->head_len, icmp_param->head_len, @@ -359,7 +361,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) { struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); - struct net *net = dev_net(rt->u.dst.dev); + struct net *net = dev_net(rt->dst.dev); struct sock *sk; struct inet_sock *inet; __be32 daddr; @@ -427,7 +429,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) if (!rt) goto out; - net = dev_net(rt->u.dst.dev); + net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. @@ -596,9 +598,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) /* Ugh! */ orefdst = skb_in->_skb_refdst; /* save old refdst */ err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, - RT_TOS(tos), rt2->u.dst.dev); + RT_TOS(tos), rt2->dst.dev); - dst_release(&rt2->u.dst); + dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); skb_in->_skb_refdst = orefdst; /* restore old refdst */ } @@ -610,7 +612,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) XFRM_LOOKUP_ICMP); switch (err) { case 0: - dst_release(&rt->u.dst); + dst_release(&rt->dst); rt = rt2; break; case -EPERM: @@ -629,7 +631,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) /* RFC says return as much as we can without exceeding 576 bytes. */ - room = dst_mtu(&rt->u.dst); + room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; @@ -647,6 +649,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) icmp_xmit_unlock(sk); out:; } +EXPORT_SYMBOL(icmp_send); /* @@ -925,6 +928,7 @@ static void icmp_address(struct sk_buff *skb) /* * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain * loudly if an inconsistency is found. + * called with rcu_read_lock() */ static void icmp_address_reply(struct sk_buff *skb) @@ -935,12 +939,12 @@ static void icmp_address_reply(struct sk_buff *skb) struct in_ifaddr *ifa; if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) - goto out; + return; - in_dev = in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (!in_dev) - goto out; - rcu_read_lock(); + return; + if (in_dev->ifa_list && IN_DEV_LOG_MARTIANS(in_dev) && IN_DEV_FORWARD(in_dev)) { @@ -958,9 +962,6 @@ static void icmp_address_reply(struct sk_buff *skb) mp, dev->name, &rt->rt_src); } } - rcu_read_unlock(); - in_dev_put(in_dev); -out:; } static void icmp_discard(struct sk_buff *skb) @@ -974,7 +975,7 @@ int icmp_rcv(struct sk_buff *skb) { struct icmphdr *icmph; struct rtable *rt = skb_rtable(skb); - struct net *net = dev_net(rt->u.dst.dev); + struct net *net = dev_net(rt->dst.dev); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); @@ -1216,7 +1217,3 @@ int __init icmp_init(void) { return register_pernet_subsys(&icmp_sk_ops); } - -EXPORT_SYMBOL(icmp_err_convert); -EXPORT_SYMBOL(icmp_send); -EXPORT_SYMBOL(xrlim_allow); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 5fff865a4fa745c02cf93e65323f3316ae8299d0..a1ad0e7180d2bd7051df4f78bed75fbb7612da40 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -312,7 +312,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) return NULL; } - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); skb->dev = dev; skb_reserve(skb, LL_RESERVED_SPACE(dev)); @@ -330,7 +330,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) pip->saddr = rt->rt_src; pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ - ip_select_ident(pip, &rt->u.dst, NULL); + ip_select_ident(pip, &rt->dst, NULL); ((u8*)&pip[1])[0] = IPOPT_RA; ((u8*)&pip[1])[1] = 4; ((u8*)&pip[1])[2] = 0; @@ -660,7 +660,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, return -1; } - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); skb_reserve(skb, LL_RESERVED_SPACE(dev)); @@ -676,7 +676,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, iph->daddr = dst; iph->saddr = rt->rt_src; iph->protocol = IPPROTO_IGMP; - ip_select_ident(iph, &rt->u.dst, NULL); + ip_select_ident(iph, &rt->dst, NULL); ((u8*)&iph[1])[0] = IPOPT_RA; ((u8*)&iph[1])[1] = 4; ((u8*)&iph[1])[2] = 0; @@ -916,18 +916,19 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, read_unlock(&in_dev->mc_list_lock); } +/* called in rcu_read_lock() section */ int igmp_rcv(struct sk_buff *skb) { /* This basically follows the spec line by line -- see RFC1112 */ struct igmphdr *ih; - struct in_device *in_dev = in_dev_get(skb->dev); + struct in_device *in_dev = __in_dev_get_rcu(skb->dev); int len = skb->len; if (in_dev == NULL) goto drop; if (!pskb_may_pull(skb, sizeof(struct igmphdr))) - goto drop_ref; + goto drop; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: @@ -937,7 +938,7 @@ int igmp_rcv(struct sk_buff *skb) case CHECKSUM_NONE: skb->csum = 0; if (__skb_checksum_complete(skb)) - goto drop_ref; + goto drop; } ih = igmp_hdr(skb); @@ -957,7 +958,6 @@ int igmp_rcv(struct sk_buff *skb) break; case IGMP_PIM: #ifdef CONFIG_IP_PIMSM_V1 - in_dev_put(in_dev); return pim_rcv_v1(skb); #endif case IGMPV3_HOST_MEMBERSHIP_REPORT: @@ -971,8 +971,6 @@ int igmp_rcv(struct sk_buff *skb) break; } -drop_ref: - in_dev_put(in_dev); drop: kfree_skb(skb); return 0; @@ -1246,6 +1244,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) out: return; } +EXPORT_SYMBOL(ip_mc_inc_group); /* * Resend IGMP JOIN report; used for bonding. @@ -1268,6 +1267,7 @@ void ip_mc_rejoin_group(struct ip_mc_list *im) igmp_ifc_event(in_dev); #endif } +EXPORT_SYMBOL(ip_mc_rejoin_group); /* * A socket has left a multicast group on device dev @@ -1298,6 +1298,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) } } } +EXPORT_SYMBOL(ip_mc_dec_group); /* Device changing type */ @@ -1427,7 +1428,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) } if (!dev && !ip_route_output_key(net, &rt, &fl)) { - dev = rt->u.dst.dev; + dev = rt->dst.dev; ip_rt_put(rt); } if (dev) { @@ -1646,8 +1647,7 @@ static int sf_setstate(struct ip_mc_list *pmc) if (dpsf->sf_inaddr == psf->sf_inaddr) break; if (!dpsf) { - dpsf = (struct ip_sf_list *) - kmalloc(sizeof(*dpsf), GFP_ATOMIC); + dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); if (!dpsf) continue; *dpsf = *psf; @@ -1807,6 +1807,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) rtnl_unlock(); return err; } +EXPORT_SYMBOL(ip_mc_join_group); static void ip_sf_socklist_reclaim(struct rcu_head *rp) { @@ -2679,8 +2680,3 @@ int __init igmp_mc_proc_init(void) return register_pernet_subsys(&igmp_net_ops); } #endif - -EXPORT_SYMBOL(ip_mc_dec_group); -EXPORT_SYMBOL(ip_mc_inc_group); -EXPORT_SYMBOL(ip_mc_join_group); -EXPORT_SYMBOL(ip_mc_rejoin_group); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 70eb3507c40645a028d0ecd0421911f10f0a541d..7174370b1195b8f79c738c5b78f4156421bb50a1 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -84,7 +84,6 @@ int inet_csk_bind_conflict(const struct sock *sk, } return node != NULL; } - EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); /* Obtain a reference to a local port for the given sock, @@ -212,7 +211,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) local_bh_enable(); return ret; } - EXPORT_SYMBOL_GPL(inet_csk_get_port); /* @@ -305,7 +303,6 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) *err = error; goto out; } - EXPORT_SYMBOL(inet_csk_accept); /* @@ -327,7 +324,6 @@ void inet_csk_init_xmit_timers(struct sock *sk, setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); icsk->icsk_pending = icsk->icsk_ack.pending = 0; } - EXPORT_SYMBOL(inet_csk_init_xmit_timers); void inet_csk_clear_xmit_timers(struct sock *sk) @@ -340,21 +336,18 @@ void inet_csk_clear_xmit_timers(struct sock *sk) sk_stop_timer(sk, &icsk->icsk_delack_timer); sk_stop_timer(sk, &sk->sk_timer); } - EXPORT_SYMBOL(inet_csk_clear_xmit_timers); void inet_csk_delete_keepalive_timer(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); } - EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) { sk_reset_timer(sk, &sk->sk_timer, jiffies + len); } - EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); struct dst_entry *inet_csk_route_req(struct sock *sk, @@ -383,7 +376,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, goto no_route; if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto route_err; - return &rt->u.dst; + return &rt->dst; route_err: ip_rt_put(rt); @@ -391,7 +384,6 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } - EXPORT_SYMBOL_GPL(inet_csk_route_req); static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, @@ -433,7 +425,6 @@ struct request_sock *inet_csk_search_req(const struct sock *sk, return req; } - EXPORT_SYMBOL_GPL(inet_csk_search_req); void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, @@ -447,11 +438,11 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); inet_csk_reqsk_queue_added(sk, timeout); } +EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); /* Only thing we need from tcp.h */ extern int sysctl_tcp_synack_retries; -EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); /* Decide when to expire the request and when to resend SYN-ACK */ static inline void syn_ack_recalc(struct request_sock *req, const int thresh, @@ -569,7 +560,6 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, if (lopt->qlen) inet_csk_reset_keepalive_timer(parent, interval); } - EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, @@ -599,7 +589,6 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, } return newsk; } - EXPORT_SYMBOL_GPL(inet_csk_clone); /* @@ -630,7 +619,6 @@ void inet_csk_destroy_sock(struct sock *sk) percpu_counter_dec(sk->sk_prot->orphan_count); sock_put(sk); } - EXPORT_SYMBOL(inet_csk_destroy_sock); int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) @@ -665,7 +653,6 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) __reqsk_queue_destroy(&icsk->icsk_accept_queue); return -EADDRINUSE; } - EXPORT_SYMBOL_GPL(inet_csk_listen_start); /* @@ -720,7 +707,6 @@ void inet_csk_listen_stop(struct sock *sk) } WARN_ON(sk->sk_ack_backlog); } - EXPORT_SYMBOL_GPL(inet_csk_listen_stop); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) @@ -732,7 +718,6 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) sin->sin_addr.s_addr = inet->inet_daddr; sin->sin_port = inet->inet_dport; } - EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); #ifdef CONFIG_COMPAT @@ -747,7 +732,6 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); } - EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, @@ -761,6 +745,5 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); } - EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); #endif diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index a2ca6aed763bad1e0c449d32e79786512b553ef7..5ff2a51b6d0c7d5eb1b3ba73f1f516dab86204ee 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -114,7 +114,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) fq->last_in |= INET_FRAG_COMPLETE; } } - EXPORT_SYMBOL(inet_frag_kill); static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index d3e160a88219d5d45c759bff91d2da54a03939bc..fb7ad5a21ff3e2e86b2d9018ea29ccd2e3921848 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -99,7 +99,6 @@ void inet_put_port(struct sock *sk) __inet_put_port(sk); local_bh_enable(); } - EXPORT_SYMBOL(inet_put_port); void __inet_inherit_port(struct sock *sk, struct sock *child) @@ -116,7 +115,6 @@ void __inet_inherit_port(struct sock *sk, struct sock *child) inet_csk(child)->icsk_bind_hash = tb; spin_unlock(&head->lock); } - EXPORT_SYMBOL_GPL(__inet_inherit_port); static inline int compute_score(struct sock *sk, struct net *net, @@ -546,7 +544,6 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row, return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk), __inet_check_established, __inet_hash_nolisten); } - EXPORT_SYMBOL_GPL(inet_hash_connect); void inet_hashinfo_init(struct inet_hashinfo *h) @@ -560,5 +557,4 @@ void inet_hashinfo_init(struct inet_hashinfo *h) i + LISTENING_NULLS_BASE); } } - EXPORT_SYMBOL_GPL(inet_hashinfo_init); diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 6bcfe52a9c878dbc3b018c2e8562b3c4261ec669..9ffa24b9a804c143f611f37b092ccbfaa5df036f 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -51,8 +51,8 @@ * lookups performed with disabled BHs. * * Serialisation issues. - * 1. Nodes may appear in the tree only with the pool write lock held. - * 2. Nodes may disappear from the tree only with the pool write lock held + * 1. Nodes may appear in the tree only with the pool lock held. + * 2. Nodes may disappear from the tree only with the pool lock held * AND reference count being 0. * 3. Nodes appears and disappears from unused node list only under * "inet_peer_unused_lock". @@ -64,23 +64,31 @@ * usually under some other lock to prevent node disappearing * dtime: unused node list lock * v4daddr: unchangeable - * ip_id_count: idlock + * ip_id_count: atomic value (no lock needed) */ static struct kmem_cache *peer_cachep __read_mostly; #define node_height(x) x->avl_height -static struct inet_peer peer_fake_node = { - .avl_left = &peer_fake_node, - .avl_right = &peer_fake_node, + +#define peer_avl_empty ((struct inet_peer *)&peer_fake_node) +static const struct inet_peer peer_fake_node = { + .avl_left = peer_avl_empty, + .avl_right = peer_avl_empty, .avl_height = 0 }; -#define peer_avl_empty (&peer_fake_node) -static struct inet_peer *peer_root = peer_avl_empty; -static DEFINE_RWLOCK(peer_pool_lock); + +static struct { + struct inet_peer *root; + spinlock_t lock; + int total; +} peers = { + .root = peer_avl_empty, + .lock = __SPIN_LOCK_UNLOCKED(peers.lock), + .total = 0, +}; #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ -static int peer_total; /* Exported for sysctl_net_ipv4. */ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more * aggressively at this stage */ @@ -89,8 +97,13 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min int inet_peer_gc_mintime __read_mostly = 10 * HZ; int inet_peer_gc_maxtime __read_mostly = 120 * HZ; -static LIST_HEAD(unused_peers); -static DEFINE_SPINLOCK(inet_peer_unused_lock); +static struct { + struct list_head list; + spinlock_t lock; +} unused_peers = { + .list = LIST_HEAD_INIT(unused_peers.list), + .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock), +}; static void peer_check_expire(unsigned long dummy); static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); @@ -116,7 +129,7 @@ void __init inet_initpeers(void) peer_cachep = kmem_cache_create("inet_peer_cache", sizeof(struct inet_peer), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, + 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); /* All the timers, started at system startup tend @@ -131,38 +144,69 @@ void __init inet_initpeers(void) /* Called with or without local BH being disabled. */ static void unlink_from_unused(struct inet_peer *p) { - spin_lock_bh(&inet_peer_unused_lock); - list_del_init(&p->unused); - spin_unlock_bh(&inet_peer_unused_lock); + if (!list_empty(&p->unused)) { + spin_lock_bh(&unused_peers.lock); + list_del_init(&p->unused); + spin_unlock_bh(&unused_peers.lock); + } } /* * Called with local BH disabled and the pool lock held. - * _stack is known to be NULL or not at compile time, - * so compiler will optimize the if (_stack) tests. */ #define lookup(_daddr, _stack) \ ({ \ struct inet_peer *u, **v; \ - if (_stack != NULL) { \ - stackptr = _stack; \ - *stackptr++ = &peer_root; \ - } \ - for (u = peer_root; u != peer_avl_empty; ) { \ + \ + stackptr = _stack; \ + *stackptr++ = &peers.root; \ + for (u = peers.root; u != peer_avl_empty; ) { \ if (_daddr == u->v4daddr) \ break; \ if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ v = &u->avl_left; \ else \ v = &u->avl_right; \ - if (_stack != NULL) \ - *stackptr++ = v; \ + *stackptr++ = v; \ u = *v; \ } \ u; \ }) -/* Called with local BH disabled and the pool write lock held. */ +/* + * Called with rcu_read_lock_bh() + * Because we hold no lock against a writer, its quite possible we fall + * in an endless loop. + * But every pointer we follow is guaranteed to be valid thanks to RCU. + * We exit from this function if number of links exceeds PEER_MAXDEPTH + */ +static struct inet_peer *lookup_rcu_bh(__be32 daddr) +{ + struct inet_peer *u = rcu_dereference_bh(peers.root); + int count = 0; + + while (u != peer_avl_empty) { + if (daddr == u->v4daddr) { + /* Before taking a reference, check if this entry was + * deleted, unlink_from_pool() sets refcnt=-1 to make + * distinction between an unused entry (refcnt=0) and + * a freed one. + */ + if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) + u = NULL; + return u; + } + if ((__force __u32)daddr < (__force __u32)u->v4daddr) + u = rcu_dereference_bh(u->avl_left); + else + u = rcu_dereference_bh(u->avl_right); + if (unlikely(++count == PEER_MAXDEPTH)) + break; + } + return NULL; +} + +/* Called with local BH disabled and the pool lock held. */ #define lookup_rightempty(start) \ ({ \ struct inet_peer *u, **v; \ @@ -176,9 +220,10 @@ static void unlink_from_unused(struct inet_peer *p) u; \ }) -/* Called with local BH disabled and the pool write lock held. +/* Called with local BH disabled and the pool lock held. * Variable names are the proof of operation correctness. - * Look into mm/map_avl.c for more detail description of the ideas. */ + * Look into mm/map_avl.c for more detail description of the ideas. + */ static void peer_avl_rebalance(struct inet_peer **stack[], struct inet_peer ***stackend) { @@ -254,15 +299,21 @@ static void peer_avl_rebalance(struct inet_peer **stack[], } } -/* Called with local BH disabled and the pool write lock held. */ +/* Called with local BH disabled and the pool lock held. */ #define link_to_pool(n) \ do { \ n->avl_height = 1; \ n->avl_left = peer_avl_empty; \ n->avl_right = peer_avl_empty; \ + smp_wmb(); /* lockless readers can catch us now */ \ **--stackptr = n; \ peer_avl_rebalance(stack, stackptr); \ -} while(0) +} while (0) + +static void inetpeer_free_rcu(struct rcu_head *head) +{ + kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); +} /* May be called with local BH enabled. */ static void unlink_from_pool(struct inet_peer *p) @@ -271,13 +322,14 @@ static void unlink_from_pool(struct inet_peer *p) do_free = 0; - write_lock_bh(&peer_pool_lock); + spin_lock_bh(&peers.lock); /* Check the reference counter. It was artificially incremented by 1 - * in cleanup() function to prevent sudden disappearing. If the - * reference count is still 1 then the node is referenced only as `p' - * here and from the pool. So under the exclusive pool lock it's safe - * to remove the node and free it later. */ - if (atomic_read(&p->refcnt) == 1) { + * in cleanup() function to prevent sudden disappearing. If we can + * atomically (because of lockless readers) take this last reference, + * it's safe to remove the node and free it later. + * We use refcnt=-1 to alert lockless readers this entry is deleted. + */ + if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { struct inet_peer **stack[PEER_MAXDEPTH]; struct inet_peer ***stackptr, ***delp; if (lookup(p->v4daddr, stack) != p) @@ -303,20 +355,21 @@ static void unlink_from_pool(struct inet_peer *p) delp[1] = &t->avl_left; /* was &p->avl_left */ } peer_avl_rebalance(stack, stackptr); - peer_total--; + peers.total--; do_free = 1; } - write_unlock_bh(&peer_pool_lock); + spin_unlock_bh(&peers.lock); if (do_free) - kmem_cache_free(peer_cachep, p); + call_rcu_bh(&p->rcu, inetpeer_free_rcu); else /* The node is used again. Decrease the reference counter * back. The loop "cleanup -> unlink_from_unused * -> unlink_from_pool -> putpeer -> link_to_unused * -> cleanup (for the same node)" * doesn't really exist because the entry will have a - * recent deletion time and will not be cleaned again soon. */ + * recent deletion time and will not be cleaned again soon. + */ inet_putpeer(p); } @@ -326,16 +379,16 @@ static int cleanup_once(unsigned long ttl) struct inet_peer *p = NULL; /* Remove the first entry from the list of unused nodes. */ - spin_lock_bh(&inet_peer_unused_lock); - if (!list_empty(&unused_peers)) { + spin_lock_bh(&unused_peers.lock); + if (!list_empty(&unused_peers.list)) { __u32 delta; - p = list_first_entry(&unused_peers, struct inet_peer, unused); + p = list_first_entry(&unused_peers.list, struct inet_peer, unused); delta = (__u32)jiffies - p->dtime; if (delta < ttl) { /* Do not prune fresh entries. */ - spin_unlock_bh(&inet_peer_unused_lock); + spin_unlock_bh(&unused_peers.lock); return -1; } @@ -345,7 +398,7 @@ static int cleanup_once(unsigned long ttl) * before unlink_from_pool() call. */ atomic_inc(&p->refcnt); } - spin_unlock_bh(&inet_peer_unused_lock); + spin_unlock_bh(&unused_peers.lock); if (p == NULL) /* It means that the total number of USED entries has @@ -360,62 +413,56 @@ static int cleanup_once(unsigned long ttl) /* Called with or without local BH being disabled. */ struct inet_peer *inet_getpeer(__be32 daddr, int create) { - struct inet_peer *p, *n; + struct inet_peer *p; struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; - /* Look up for the address quickly. */ - read_lock_bh(&peer_pool_lock); - p = lookup(daddr, NULL); - if (p != peer_avl_empty) - atomic_inc(&p->refcnt); - read_unlock_bh(&peer_pool_lock); + /* Look up for the address quickly, lockless. + * Because of a concurrent writer, we might not find an existing entry. + */ + rcu_read_lock_bh(); + p = lookup_rcu_bh(daddr); + rcu_read_unlock_bh(); + + if (p) { + /* The existing node has been found. + * Remove the entry from unused list if it was there. + */ + unlink_from_unused(p); + return p; + } + /* retry an exact lookup, taking the lock before. + * At least, nodes should be hot in our cache. + */ + spin_lock_bh(&peers.lock); + p = lookup(daddr, stack); if (p != peer_avl_empty) { - /* The existing node has been found. */ + atomic_inc(&p->refcnt); + spin_unlock_bh(&peers.lock); /* Remove the entry from unused list if it was there. */ unlink_from_unused(p); return p; } + p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; + if (p) { + p->v4daddr = daddr; + atomic_set(&p->refcnt, 1); + atomic_set(&p->rid, 0); + atomic_set(&p->ip_id_count, secure_ip_id(daddr)); + p->tcp_ts_stamp = 0; + INIT_LIST_HEAD(&p->unused); + + + /* Link the node. */ + link_to_pool(p); + peers.total++; + } + spin_unlock_bh(&peers.lock); - if (!create) - return NULL; - - /* Allocate the space outside the locked region. */ - n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); - if (n == NULL) - return NULL; - n->v4daddr = daddr; - atomic_set(&n->refcnt, 1); - atomic_set(&n->rid, 0); - atomic_set(&n->ip_id_count, secure_ip_id(daddr)); - n->tcp_ts_stamp = 0; - - write_lock_bh(&peer_pool_lock); - /* Check if an entry has suddenly appeared. */ - p = lookup(daddr, stack); - if (p != peer_avl_empty) - goto out_free; - - /* Link the node. */ - link_to_pool(n); - INIT_LIST_HEAD(&n->unused); - peer_total++; - write_unlock_bh(&peer_pool_lock); - - if (peer_total >= inet_peer_threshold) + if (peers.total >= inet_peer_threshold) /* Remove one less-recently-used entry. */ cleanup_once(0); - return n; - -out_free: - /* The appropriate node is already in the pool. */ - atomic_inc(&p->refcnt); - write_unlock_bh(&peer_pool_lock); - /* Remove the entry from unused list if it was there. */ - unlink_from_unused(p); - /* Free preallocated the preallocated node. */ - kmem_cache_free(peer_cachep, n); return p; } @@ -425,12 +472,12 @@ static void peer_check_expire(unsigned long dummy) unsigned long now = jiffies; int ttl; - if (peer_total >= inet_peer_threshold) + if (peers.total >= inet_peer_threshold) ttl = inet_peer_minttl; else ttl = inet_peer_maxttl - (inet_peer_maxttl - inet_peer_minttl) / HZ * - peer_total / inet_peer_threshold * HZ; + peers.total / inet_peer_threshold * HZ; while (!cleanup_once(ttl)) { if (jiffies != now) break; @@ -439,22 +486,25 @@ static void peer_check_expire(unsigned long dummy) /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime * interval depending on the total number of entries (more entries, * less interval). */ - if (peer_total >= inet_peer_threshold) + if (peers.total >= inet_peer_threshold) peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; else peer_periodic_timer.expires = jiffies + inet_peer_gc_maxtime - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * - peer_total / inet_peer_threshold * HZ; + peers.total / inet_peer_threshold * HZ; add_timer(&peer_periodic_timer); } void inet_putpeer(struct inet_peer *p) { - spin_lock_bh(&inet_peer_unused_lock); - if (atomic_dec_and_test(&p->refcnt)) { - list_add_tail(&p->unused, &unused_peers); + local_bh_disable(); + + if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) { + list_add_tail(&p->unused, &unused_peers.list); p->dtime = (__u32)jiffies; + spin_unlock(&unused_peers.lock); } - spin_unlock_bh(&inet_peer_unused_lock); + + local_bh_enable(); } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 56cdf68a074c3d27cf9c85c03ac86b6803147d22..99461f09320f6d7350ac2a91e5c49062617f2c15 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -87,16 +87,16 @@ int ip_forward(struct sk_buff *skb) if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto sr_failed; - if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && + if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { - IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); + IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(dst_mtu(&rt->u.dst))); + htonl(dst_mtu(&rt->dst))); goto drop; } /* We are about to mangle packet. Copy it! */ - if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) + if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len)) goto drop; iph = ip_hdr(skb); @@ -113,7 +113,7 @@ int ip_forward(struct sk_buff *skb) skb->priority = rt_tos2priority(iph->tos); return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, - rt->u.dst.dev, ip_forward_finish); + rt->dst.dev, ip_forward_finish); sr_failed: /* diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 75347ea70ea071d6fef5223535b6bb83973b602b..b7c41654dde543ff310ac63aa696d65a58fd4d36 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -124,11 +124,8 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a) } /* Memory Tracking Functions. */ -static __inline__ void frag_kfree_skb(struct netns_frags *nf, - struct sk_buff *skb, int *work) +static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) { - if (work) - *work -= skb->truesize; atomic_sub(skb->truesize, &nf->mem); kfree_skb(skb); } @@ -309,7 +306,7 @@ static int ip_frag_reinit(struct ipq *qp) fp = qp->q.fragments; do { struct sk_buff *xp = fp->next; - frag_kfree_skb(qp->q.net, fp, NULL); + frag_kfree_skb(qp->q.net, fp); fp = xp; } while (fp); @@ -317,6 +314,7 @@ static int ip_frag_reinit(struct ipq *qp) qp->q.len = 0; qp->q.meat = 0; qp->q.fragments = NULL; + qp->q.fragments_tail = NULL; qp->iif = 0; return 0; @@ -389,6 +387,11 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) * in the chain of fragments so far. We must know where to put * this fragment, right? */ + prev = qp->q.fragments_tail; + if (!prev || FRAG_CB(prev)->offset < offset) { + next = NULL; + goto found; + } prev = NULL; for (next = qp->q.fragments; next != NULL; next = next->next) { if (FRAG_CB(next)->offset >= offset) @@ -396,6 +399,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) prev = next; } +found: /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. @@ -446,7 +450,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) qp->q.fragments = next; qp->q.meat -= free_it->len; - frag_kfree_skb(qp->q.net, free_it, NULL); + frag_kfree_skb(qp->q.net, free_it); } } @@ -454,6 +458,8 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) /* Insert this fragment in the chain of fragments. */ skb->next = next; + if (!next) + qp->q.fragments_tail = skb; if (prev) prev->next = skb; else @@ -507,6 +513,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, goto out_nomem; fp->next = head->next; + if (!fp->next) + qp->q.fragments_tail = fp; prev->next = fp; skb_morph(head, qp->q.fragments); @@ -556,7 +564,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); - atomic_sub(head->truesize, &qp->q.net->mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; @@ -566,8 +573,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; - atomic_sub(fp->truesize, &qp->q.net->mem); } + atomic_sub(head->truesize, &qp->q.net->mem); head->next = NULL; head->dev = dev; @@ -578,6 +585,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, iph->tot_len = htons(len); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; + qp->q.fragments_tail = NULL; return 0; out_nomem: @@ -624,6 +632,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) kfree_skb(skb); return -ENOMEM; } +EXPORT_SYMBOL(ip_defrag); #ifdef CONFIG_SYSCTL static int zero; @@ -777,5 +786,3 @@ void __init ipfrag_init(void) ip4_frags.secret_interval = 10 * 60 * HZ; inet_frags_init(&ip4_frags); } - -EXPORT_SYMBOL(ip_defrag); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 32618e11076d49776d3e017ac162fcc0837489b5..945b20a5ad5006b6a8ebf84b22756f7573436c1f 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -731,6 +731,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev tos = 0; if (skb->protocol == htons(ETH_P_IP)) tos = old_iph->tos; + else if (skb->protocol == htons(ETH_P_IPV6)) + tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph); } { @@ -745,7 +747,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev goto tx_error; } } - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; if (tdev == dev) { ip_rt_put(rt); @@ -755,7 +757,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev df = tiph->frag_off; if (df) - mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; + mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen; else mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; @@ -803,7 +805,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev tunnel->err_count = 0; } - max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len; + max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len; if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { @@ -830,7 +832,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. @@ -853,7 +855,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; #endif else - iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); + iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT); } ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; @@ -915,7 +917,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev) .proto = IPPROTO_GRE }; struct rtable *rt; if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; ip_rt_put(rt); } @@ -1174,7 +1176,7 @@ static int ipgre_open(struct net_device *dev) struct rtable *rt; if (ip_route_output_key(dev_net(dev), &rt, &fl)) return -EADDRNOTAVAIL; - dev = rt->u.dst.dev; + dev = rt->dst.dev; ip_rt_put(rt); if (__in_dev_get_rtnl(dev) == NULL) return -EADDRNOTAVAIL; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index d930dc5e4d85a752ddb7af15b482b47108b62241..d859bcc26cb7e568b60261e8a0703add32a92798 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -146,7 +146,7 @@ #include /* - * Process Router Attention IP option + * Process Router Attention IP option (RFC 2113) */ int ip_call_ra_chain(struct sk_buff *skb) { @@ -155,8 +155,7 @@ int ip_call_ra_chain(struct sk_buff *skb) struct sock *last = NULL; struct net_device *dev = skb->dev; - read_lock(&ip_ra_lock); - for (ra = ip_ra_chain; ra; ra = ra->next) { + for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) { struct sock *sk = ra->sk; /* If socket is bound to an interface, only report @@ -167,10 +166,8 @@ int ip_call_ra_chain(struct sk_buff *skb) sk->sk_bound_dev_if == dev->ifindex) && net_eq(sock_net(sk), dev_net(dev))) { if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { - if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { - read_unlock(&ip_ra_lock); + if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) return 1; - } } if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); @@ -183,10 +180,8 @@ int ip_call_ra_chain(struct sk_buff *skb) if (last) { raw_rcv(last, skb); - read_unlock(&ip_ra_lock); return 1; } - read_unlock(&ip_ra_lock); return 0; } @@ -298,18 +293,16 @@ static inline int ip_rcv_options(struct sk_buff *skb) } if (unlikely(opt->srr)) { - struct in_device *in_dev = in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rcu(dev); + if (in_dev) { if (!IN_DEV_SOURCE_ROUTE(in_dev)) { if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) printk(KERN_INFO "source route option %pI4 -> %pI4\n", &iph->saddr, &iph->daddr); - in_dev_put(in_dev); goto drop; } - - in_dev_put(in_dev); } if (ip_options_rcv_srr(skb)) @@ -340,13 +333,16 @@ static int ip_rcv_finish(struct sk_buff *skb) else if (err == -ENETUNREACH) IP_INC_STATS_BH(dev_net(skb->dev), IPSTATS_MIB_INNOROUTES); + else if (err == -EXDEV) + NET_INC_STATS_BH(dev_net(skb->dev), + LINUX_MIB_IPRPFILTER); goto drop; } } #ifdef CONFIG_NET_CLS_ROUTE if (unlikely(skb_dst(skb)->tclassid)) { - struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); + struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct); u32 idx = skb_dst(skb)->tclassid; st[idx&0xFF].o_packets++; st[idx&0xFF].o_bytes += skb->len; @@ -360,10 +356,10 @@ static int ip_rcv_finish(struct sk_buff *skb) rt = skb_rtable(skb); if (rt->rt_type == RTN_MULTICAST) { - IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, + IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST, skb->len); } else if (rt->rt_type == RTN_BROADCAST) - IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST, + IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST, skb->len); return dst_input(skb); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 041d41df1224eec36f214cd649467e423be5de96..04b69896df5fc743021efd4d4a1705b7de146333 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -89,6 +89,7 @@ __inline__ void ip_send_check(struct iphdr *iph) iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } +EXPORT_SYMBOL(ip_send_check); int __ip_local_out(struct sk_buff *skb) { @@ -151,15 +152,15 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, iph->version = 4; iph->ihl = 5; iph->tos = inet->tos; - if (ip_dont_fragment(sk, &rt->u.dst)) + if (ip_dont_fragment(sk, &rt->dst)) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; - iph->ttl = ip_select_ttl(inet, &rt->u.dst); + iph->ttl = ip_select_ttl(inet, &rt->dst); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; iph->protocol = sk->sk_protocol; - ip_select_ident(iph, &rt->u.dst, sk); + ip_select_ident(iph, &rt->dst, sk); if (opt && opt->optlen) { iph->ihl += opt->optlen>>2; @@ -172,7 +173,6 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, /* Send it out. */ return ip_local_out(skb); } - EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); static inline int ip_finish_output2(struct sk_buff *skb) @@ -240,7 +240,7 @@ int ip_mc_output(struct sk_buff *skb) { struct sock *sk = skb->sk; struct rtable *rt = skb_rtable(skb); - struct net_device *dev = rt->u.dst.dev; + struct net_device *dev = rt->dst.dev; /* * If the indicated interface is up and running, send the packet. @@ -359,9 +359,9 @@ int ip_queue_xmit(struct sk_buff *skb) if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) goto no_route; } - sk_setup_caps(sk, &rt->u.dst); + sk_setup_caps(sk, &rt->dst); } - skb_dst_set_noref(skb, &rt->u.dst); + skb_dst_set_noref(skb, &rt->dst); packet_routed: if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) @@ -372,11 +372,11 @@ int ip_queue_xmit(struct sk_buff *skb) skb_reset_network_header(skb); iph = ip_hdr(skb); *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); - if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df) + if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; - iph->ttl = ip_select_ttl(inet, &rt->u.dst); + iph->ttl = ip_select_ttl(inet, &rt->dst); iph->protocol = sk->sk_protocol; iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; @@ -387,7 +387,7 @@ int ip_queue_xmit(struct sk_buff *skb) ip_options_build(skb, opt, inet->inet_daddr, rt, 0); } - ip_select_ident_more(iph, &rt->u.dst, sk, + ip_select_ident_more(iph, &rt->dst, sk, (skb_shinfo(skb)->gso_segs ?: 1) - 1); skb->priority = sk->sk_priority; @@ -403,6 +403,7 @@ int ip_queue_xmit(struct sk_buff *skb) kfree_skb(skb); return -EHOSTUNREACH; } +EXPORT_SYMBOL(ip_queue_xmit); static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) @@ -411,7 +412,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->priority = from->priority; to->protocol = from->protocol; skb_dst_drop(to); - skb_dst_set(to, dst_clone(skb_dst(from))); + skb_dst_copy(to, from); to->dev = from->dev; to->mark = from->mark; @@ -442,17 +443,16 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct iphdr *iph; - int raw = 0; int ptr; struct net_device *dev; struct sk_buff *skb2; - unsigned int mtu, hlen, left, len, ll_rs, pad; + unsigned int mtu, hlen, left, len, ll_rs; int offset; __be16 not_last_frag; struct rtable *rt = skb_rtable(skb); int err = 0; - dev = rt->u.dst.dev; + dev = rt->dst.dev; /* * Point into the IP datagram header. @@ -473,7 +473,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) */ hlen = iph->ihl * 4; - mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */ + mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */ #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge) mtu -= nf_bridge_mtu_reduction(skb); @@ -580,14 +580,12 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) slow_path: left = skb->len - hlen; /* Space per frame */ - ptr = raw + hlen; /* Where to start from */ + ptr = hlen; /* Where to start from */ /* for bridged IP traffic encapsulated inside f.e. a vlan header, * we need to make room for the encapsulating header */ - pad = nf_bridge_pad(skb); - ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad); - mtu -= pad; + ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb)); /* * Fragment the datagram. @@ -697,7 +695,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); return err; } - EXPORT_SYMBOL(ip_fragment); int @@ -716,6 +713,7 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk } return 0; } +EXPORT_SYMBOL(ip_generic_getfrag); static inline __wsum csum_page(struct page *page, int offset, int copy) @@ -833,13 +831,13 @@ int ip_append_data(struct sock *sk, */ *rtp = NULL; inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ? - rt->u.dst.dev->mtu : - dst_mtu(rt->u.dst.path); - inet->cork.dst = &rt->u.dst; + rt->dst.dev->mtu : + dst_mtu(rt->dst.path); + inet->cork.dst = &rt->dst; inet->cork.length = 0; sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; - if ((exthdrlen = rt->u.dst.header_len) != 0) { + if ((exthdrlen = rt->dst.header_len) != 0) { length += exthdrlen; transhdrlen += exthdrlen; } @@ -852,7 +850,7 @@ int ip_append_data(struct sock *sk, exthdrlen = 0; mtu = inet->cork.fragsize; } - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); + hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; @@ -869,7 +867,7 @@ int ip_append_data(struct sock *sk, */ if (transhdrlen && length + fragheaderlen <= mtu && - rt->u.dst.dev->features & NETIF_F_V4_CSUM && + rt->dst.dev->features & NETIF_F_V4_CSUM && !exthdrlen) csummode = CHECKSUM_PARTIAL; @@ -878,7 +876,7 @@ int ip_append_data(struct sock *sk, inet->cork.length += length; if (((length > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->u.dst.dev->features & NETIF_F_UFO)) { + (rt->dst.dev->features & NETIF_F_UFO)) { err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, mtu, flags); @@ -926,7 +924,7 @@ int ip_append_data(struct sock *sk, fraglen = datalen + fragheaderlen; if ((flags & MSG_MORE) && - !(rt->u.dst.dev->features&NETIF_F_SG)) + !(rt->dst.dev->features&NETIF_F_SG)) alloclen = mtu; else alloclen = datalen + fragheaderlen; @@ -937,7 +935,7 @@ int ip_append_data(struct sock *sk, * the last. */ if (datalen == length + fraggap) - alloclen += rt->u.dst.trailer_len; + alloclen += rt->dst.trailer_len; if (transhdrlen) { skb = sock_alloc_send_skb(sk, @@ -1010,7 +1008,7 @@ int ip_append_data(struct sock *sk, if (copy > length) copy = length; - if (!(rt->u.dst.dev->features&NETIF_F_SG)) { + if (!(rt->dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; @@ -1105,10 +1103,10 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, if (inet->cork.flags & IPCORK_OPT) opt = inet->cork.opt; - if (!(rt->u.dst.dev->features&NETIF_F_SG)) + if (!(rt->dst.dev->features&NETIF_F_SG)) return -EOPNOTSUPP; - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); + hh_len = LL_RESERVED_SPACE(rt->dst.dev); mtu = inet->cork.fragsize; fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); @@ -1125,7 +1123,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, inet->cork.length += size; if ((size + skb->len > mtu) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->u.dst.dev->features & NETIF_F_UFO)) { + (rt->dst.dev->features & NETIF_F_UFO)) { skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; } @@ -1277,8 +1275,8 @@ int ip_push_pending_frames(struct sock *sk) * If local_df is set too, we still allow to fragment this frame * locally. */ if (inet->pmtudisc >= IP_PMTUDISC_DO || - (skb->len <= dst_mtu(&rt->u.dst) && - ip_dont_fragment(sk, &rt->u.dst))) + (skb->len <= dst_mtu(&rt->dst) && + ip_dont_fragment(sk, &rt->dst))) df = htons(IP_DF); if (inet->cork.flags & IPCORK_OPT) @@ -1287,7 +1285,7 @@ int ip_push_pending_frames(struct sock *sk) if (rt->rt_type == RTN_MULTICAST) ttl = inet->mc_ttl; else - ttl = ip_select_ttl(inet, &rt->u.dst); + ttl = ip_select_ttl(inet, &rt->dst); iph = (struct iphdr *)skb->data; iph->version = 4; @@ -1298,7 +1296,7 @@ int ip_push_pending_frames(struct sock *sk) } iph->tos = inet->tos; iph->frag_off = df; - ip_select_ident(iph, &rt->u.dst, sk); + ip_select_ident(iph, &rt->dst, sk); iph->ttl = ttl; iph->protocol = sk->sk_protocol; iph->saddr = rt->rt_src; @@ -1311,7 +1309,7 @@ int ip_push_pending_frames(struct sock *sk) * on dst refcount */ inet->cork.dst = NULL; - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); if (iph->protocol == IPPROTO_ICMP) icmp_out_count(net, ((struct icmphdr *) @@ -1448,7 +1446,3 @@ void __init ip_init(void) igmp_mc_proc_init(); #endif } - -EXPORT_SYMBOL(ip_generic_getfrag); -EXPORT_SYMBOL(ip_queue_xmit); -EXPORT_SYMBOL(ip_send_check); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ce231780a2b14ef4e15a33baad4b0c887e96df16..6c40a8c46e7984843275af12bfbac9a241e8e4c6 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -239,7 +239,16 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) sent to multicast group to reach destination designated router. */ struct ip_ra_chain *ip_ra_chain; -DEFINE_RWLOCK(ip_ra_lock); +static DEFINE_SPINLOCK(ip_ra_lock); + + +static void ip_ra_destroy_rcu(struct rcu_head *head) +{ + struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); + + sock_put(ra->saved_sk); + kfree(ra); +} int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)) @@ -251,35 +260,42 @@ int ip_ra_control(struct sock *sk, unsigned char on, new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; - write_lock_bh(&ip_ra_lock); + spin_lock_bh(&ip_ra_lock); for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { if (ra->sk == sk) { if (on) { - write_unlock_bh(&ip_ra_lock); + spin_unlock_bh(&ip_ra_lock); kfree(new_ra); return -EADDRINUSE; } - *rap = ra->next; - write_unlock_bh(&ip_ra_lock); + /* dont let ip_call_ra_chain() use sk again */ + ra->sk = NULL; + rcu_assign_pointer(*rap, ra->next); + spin_unlock_bh(&ip_ra_lock); if (ra->destructor) ra->destructor(sk); - sock_put(sk); - kfree(ra); + /* + * Delay sock_put(sk) and kfree(ra) after one rcu grace + * period. This guarantee ip_call_ra_chain() dont need + * to mess with socket refcounts. + */ + ra->saved_sk = sk; + call_rcu(&ra->rcu, ip_ra_destroy_rcu); return 0; } } if (new_ra == NULL) { - write_unlock_bh(&ip_ra_lock); + spin_unlock_bh(&ip_ra_lock); return -ENOBUFS; } new_ra->sk = sk; new_ra->destructor = destructor; new_ra->next = ra; - *rap = new_ra; + rcu_assign_pointer(*rap, new_ra); sock_hold(sk); - write_unlock_bh(&ip_ra_lock); + spin_unlock_bh(&ip_ra_lock); return 0; } @@ -449,7 +465,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, (1<hdrincl = val ? 1 : 0; break; + case IP_NODEFRAG: + if (sk->sk_type != SOCK_RAW) { + err = -ENOPROTOOPT; + break; + } + inet->nodefrag = val ? 1 : 0; + break; case IP_MTU_DISCOVER: if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE) goto e_inval; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index b9d84e800cf4385b9d0bf51fe8aae78989d295b4..3a6e1ec5e9ae61f31d8ce77b14335bf5f56aae31 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -665,6 +665,13 @@ ic_dhcp_init_options(u8 *options) memcpy(e, ic_req_params, sizeof(ic_req_params)); e += sizeof(ic_req_params); + if (ic_host_name_set) { + *e++ = 12; /* host-name */ + len = strlen(utsname()->nodename); + *e++ = len; + memcpy(e, utsname()->nodename, len); + e += len; + } if (*vendor_class_identifier) { printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", vendor_class_identifier); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 7fd636711037ae9be434f4b091e040a70843ef69..ec036731a70ba0774164de8574c30d0f0e1baa64 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -435,7 +435,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_error_icmp; } } - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; if (tdev == dev) { ip_rt_put(rt); @@ -446,7 +446,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) df |= old_iph->frag_off & htons(IP_DF); if (df) { - mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); + mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); if (mtu < 68) { stats->collisions++; @@ -503,7 +503,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. @@ -552,7 +552,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev) .proto = IPPROTO_IPIP }; struct rtable *rt; if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; ip_rt_put(rt); } dev->flags |= IFF_POINTOPOINT; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 7f6273506eea46522a17ff85b9a451cb18fdc7d3..179fcab866fc5f550d580f3ca06b5abd24f0f58a 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1555,9 +1555,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, goto out_free; } - dev = rt->u.dst.dev; + dev = rt->dst.dev; - if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) { + if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { /* Do not fragment multicasts. Alas, IPv4 does not allow to send ICMP, so that packets will disappear to blackhole. @@ -1568,7 +1568,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, goto out_free; } - encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; + encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; if (skb_cow(skb, encap)) { ip_rt_put(rt); @@ -1579,7 +1579,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, vif->bytes_out += skb->len; skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); ip_decrease_ttl(ip_hdr(skb)); /* FIXME: forward and output firewalls used to be called here. diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 07de855e2175ea1f40760c84388388d4676f30b0..d88a46c54fd1bbb62a6f9ed13570c518940666b5 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -43,7 +43,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) /* Drop old route. */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); } else { /* non-local src, find valid iif to satisfy * rp-filter when calling ip_route_input. */ @@ -53,11 +53,11 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) orefdst = skb->_skb_refdst; if (ip_route_input(skb, iph->daddr, iph->saddr, - RT_TOS(iph->tos), rt->u.dst.dev) != 0) { - dst_release(&rt->u.dst); + RT_TOS(iph->tos), rt->dst.dev) != 0) { + dst_release(&rt->dst); return -1; } - dst_release(&rt->u.dst); + dst_release(&rt->dst); refdst_drop(orefdst); } @@ -212,9 +212,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, skb->len - dataoff, 0); skb->ip_summed = CHECKSUM_NONE; - csum = __skb_checksum_complete_head(skb, dataoff + len); - if (!csum) - skb->ip_summed = CHECKSUM_UNNECESSARY; + return __skb_checksum_complete_head(skb, dataoff + len); } return csum; } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 1ac01b1286219475cde625bb921887fc3645d7a3..6bccba31d13208d03f042002f5808c3396c1f37f 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -283,16 +283,13 @@ unsigned int arpt_do_table(struct sk_buff *skb, arp = arp_hdr(skb); do { const struct arpt_entry_target *t; - int hdr_len; if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { e = arpt_next_entry(e); continue; } - hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + - (2 * skb->dev->addr_len); - ADD_COUNTER(e->counters, hdr_len, 1); + ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1); t = arpt_get_target_c(e); @@ -713,7 +710,7 @@ static void get_counters(const struct xt_table_info *t, struct arpt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu; + unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters @@ -723,14 +720,16 @@ static void get_counters(const struct xt_table_info *t, * if new softirq were to run and call ipt_do_table */ local_bh_disable(); - curcpu = smp_processor_id(); - i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } + local_bh_enable(); + /* Processing counters from other cpus, we can let bottom half enabled, + * (preemption is disabled) + */ for_each_possible_cpu(cpu) { if (cpu == curcpu) @@ -744,7 +743,7 @@ static void get_counters(const struct xt_table_info *t, } xt_info_wrunlock(cpu); } - local_bh_enable(); + put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -758,7 +757,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) * about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc_node(countersize, numa_node_id()); + counters = vmalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1005,8 +1004,7 @@ static int __do_replace(struct net *net, const char *name, struct arpt_entry *iter; ret = 0; - counters = vmalloc_node(num_counters * sizeof(struct xt_counters), - numa_node_id()); + counters = vmalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; @@ -1159,7 +1157,7 @@ static int do_add_counters(struct net *net, const void __user *user, if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; - paddc = vmalloc_node(len - size, numa_node_id()); + paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index a4e5fc5df4bfd31aef2ed606fe475e7cfc15559d..d2c1311cb28d6aee7b218351580210b4e5783a3e 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; -static DEFINE_RWLOCK(queue_lock); +static DEFINE_SPINLOCK(queue_lock); static int peer_pid __read_mostly; static unsigned int copy_range __read_mostly; static unsigned int queue_total; @@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range) break; case IPQ_COPY_PACKET: - copy_mode = mode; + if (range > 0xFFFF) + range = 0xFFFF; copy_range = range; - if (copy_range > 0xFFFF) - copy_range = 0xFFFF; + copy_mode = mode; break; default: @@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id) { struct nf_queue_entry *entry = NULL, *i; - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); list_for_each_entry(i, &queue_list, list) { if ((unsigned long)i == id) { @@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id) queue_total--; } - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return entry; } @@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data) static void ipq_flush(ipq_cmpfn cmpfn, unsigned long data) { - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); __ipq_flush(cmpfn, data); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); } static struct sk_buff * @@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) struct nlmsghdr *nlh; struct timeval tv; - read_lock_bh(&queue_lock); - - switch (copy_mode) { + switch (ACCESS_ONCE(copy_mode)) { case IPQ_COPY_META: case IPQ_COPY_NONE: size = NLMSG_SPACE(sizeof(*pmsg)); @@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) case IPQ_COPY_PACKET: if (entry->skb->ip_summed == CHECKSUM_PARTIAL && - (*errp = skb_checksum_help(entry->skb))) { - read_unlock_bh(&queue_lock); + (*errp = skb_checksum_help(entry->skb))) return NULL; - } - if (copy_range == 0 || copy_range > entry->skb->len) + + data_len = ACCESS_ONCE(copy_range); + if (data_len == 0 || data_len > entry->skb->len) data_len = entry->skb->len; - else - data_len = copy_range; size = NLMSG_SPACE(sizeof(*pmsg) + data_len); break; default: *errp = -EINVAL; - read_unlock_bh(&queue_lock); return NULL; } - read_unlock_bh(&queue_lock); - skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; @@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) if (nskb == NULL) return status; - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); if (!peer_pid) goto err_out_free_nskb; @@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) __ipq_enqueue_entry(entry); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return status; err_out_free_nskb: kfree_skb(nskb); err_out_unlock: - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return status; } @@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range) { int status; - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); status = __ipq_set_mode(mode, range); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return status; } @@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb) if (security_netlink_recv(skb, CAP_NET_ADMIN)) RCV_SKB_FAIL(-EPERM); - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); if (peer_pid) { if (peer_pid != pid) { - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); RCV_SKB_FAIL(-EBUSY); } } else { @@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb) peer_pid = pid; } - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); status = ipq_receive_peer(NLMSG_DATA(nlh), type, nlmsglen - NLMSG_LENGTH(0)); @@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this, struct netlink_notify *n = ptr; if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) __ipq_reset(); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); } return NOTIFY_DONE; } @@ -527,7 +520,7 @@ static ctl_table ipq_table[] = { #ifdef CONFIG_PROC_FS static int ip_queue_show(struct seq_file *m, void *v) { - read_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); seq_printf(m, "Peer PID : %d\n" @@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v) queue_dropped, queue_user_dropped); - read_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return 0; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 4b6c5ca610fc0a463db4995e8b58393916e0a35c..c439721b165a6369acd1bd2ea1d64d4b0580b1bb 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -364,7 +364,7 @@ ipt_do_table(struct sk_buff *skb, goto no_match; } - ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); + ADD_COUNTER(e->counters, skb->len, 1); t = ipt_get_target(e); IP_NF_ASSERT(t->u.kernel.target); @@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t, struct ipt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu; + unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters @@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t, * if new softirq were to run and call ipt_do_table */ local_bh_disable(); - curcpu = smp_processor_id(); - i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } + local_bh_enable(); + /* Processing counters from other cpus, we can let bottom half enabled, + * (preemption is disabled) + */ for_each_possible_cpu(cpu) { if (cpu == curcpu) @@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t, } xt_info_wrunlock(cpu); } - local_bh_enable(); + put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -928,7 +930,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc_node(countersize, numa_node_id()); + counters = vmalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1352,7 +1354,7 @@ do_add_counters(struct net *net, const void __user *user, if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; - paddc = vmalloc_node(len - size, numa_node_id()); + paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index f91c94b9a7900017b3589598a26c3491ef6a2407..3a43cf36db8701f9b8a99317a60da0e3ac2ad8f4 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -53,12 +53,13 @@ struct clusterip_config { #endif enum clusterip_hashmode hash_mode; /* which hashing mode */ u_int32_t hash_initval; /* hash initialization */ + struct rcu_head rcu; }; static LIST_HEAD(clusterip_configs); /* clusterip_lock protects the clusterip_configs list */ -static DEFINE_RWLOCK(clusterip_lock); +static DEFINE_SPINLOCK(clusterip_lock); #ifdef CONFIG_PROC_FS static const struct file_operations clusterip_proc_fops; @@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c) atomic_inc(&c->refcount); } + +static void clusterip_config_rcu_free(struct rcu_head *head) +{ + kfree(container_of(head, struct clusterip_config, rcu)); +} + static inline void clusterip_config_put(struct clusterip_config *c) { if (atomic_dec_and_test(&c->refcount)) - kfree(c); + call_rcu_bh(&c->rcu, clusterip_config_rcu_free); } /* decrease the count of entries using/referencing this config. If last @@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c) static inline void clusterip_config_entry_put(struct clusterip_config *c) { - write_lock_bh(&clusterip_lock); - if (atomic_dec_and_test(&c->entries)) { - list_del(&c->list); - write_unlock_bh(&clusterip_lock); + local_bh_disable(); + if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) { + list_del_rcu(&c->list); + spin_unlock(&clusterip_lock); + local_bh_enable(); dev_mc_del(c->dev, c->clustermac); dev_put(c->dev); @@ -100,7 +108,7 @@ clusterip_config_entry_put(struct clusterip_config *c) #endif return; } - write_unlock_bh(&clusterip_lock); + local_bh_enable(); } static struct clusterip_config * @@ -108,7 +116,7 @@ __clusterip_config_find(__be32 clusterip) { struct clusterip_config *c; - list_for_each_entry(c, &clusterip_configs, list) { + list_for_each_entry_rcu(c, &clusterip_configs, list) { if (c->clusterip == clusterip) return c; } @@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry) { struct clusterip_config *c; - read_lock_bh(&clusterip_lock); + rcu_read_lock_bh(); c = __clusterip_config_find(clusterip); - if (!c) { - read_unlock_bh(&clusterip_lock); - return NULL; + if (c) { + if (unlikely(!atomic_inc_not_zero(&c->refcount))) + c = NULL; + else if (entry) + atomic_inc(&c->entries); } - atomic_inc(&c->refcount); - if (entry) - atomic_inc(&c->entries); - read_unlock_bh(&clusterip_lock); + rcu_read_unlock_bh(); return c; } @@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, } #endif - write_lock_bh(&clusterip_lock); - list_add(&c->list, &clusterip_configs); - write_unlock_bh(&clusterip_lock); + spin_lock_bh(&clusterip_lock); + list_add_rcu(&c->list, &clusterip_configs); + spin_unlock_bh(&clusterip_lock); return c; } @@ -462,7 +469,7 @@ struct arp_payload { __be32 src_ip; u_int8_t dst_hw[ETH_ALEN]; __be32 dst_ip; -} __attribute__ ((packed)); +} __packed; #ifdef DEBUG static void arp_print(struct arp_payload *payload) @@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void) #endif nf_unregister_hook(&cip_arp_ops); xt_unregister_target(&clusterip_tg_reg); + + /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */ + rcu_barrier_bh(); } module_init(clusterip_tg_init); diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index 5234f4f3499ad87e06c9d1bfab14e27d35b77fe6..915fc17d7ce214a017f1993e0ab82a0603a3f4f6 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -363,6 +364,42 @@ static void dump_packet(const struct nf_loginfo *info, /* maxlen = 230+ 91 + 230 + 252 = 803 */ } +static void dump_mac_header(const struct nf_loginfo *info, + const struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + unsigned int logflags = 0; + + if (info->type == NF_LOG_TYPE_LOG) + logflags = info->u.log.logflags; + + if (!(logflags & IPT_LOG_MACDECODE)) + goto fallback; + + switch (dev->type) { + case ARPHRD_ETHER: + printk("MACSRC=%pM MACDST=%pM MACPROTO=%04x ", + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, + ntohs(eth_hdr(skb)->h_proto)); + return; + default: + break; + } + +fallback: + printk("MAC="); + if (dev->hard_header_len && + skb->mac_header != skb->network_header) { + const unsigned char *p = skb_mac_header(skb); + unsigned int i; + + printk("%02x", *p++); + for (i = 1; i < dev->hard_header_len; i++, p++) + printk(":%02x", *p); + } + printk(" "); +} + static struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { @@ -404,20 +441,9 @@ ipt_log_packet(u_int8_t pf, } #endif - if (in && !out) { - /* MAC logging for input chain only. */ - printk("MAC="); - if (skb->dev && skb->dev->hard_header_len && - skb->mac_header != skb->network_header) { - int i; - const unsigned char *p = skb_mac_header(skb); - for (i = 0; i < skb->dev->hard_header_len; i++,p++) - printk("%02x%c", *p, - i==skb->dev->hard_header_len - 1 - ? ' ':':'); - } else - printk(" "); - } + /* MAC logging for input path only. */ + if (in && !out) + dump_mac_header(loginfo, skb); dump_packet(loginfo, skb, 0); printk("\n"); diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index f43867d1697f2b6c262030672ed7411af5b2b2a4..6cdb298f103570bcf3f1b36bee23c99b32dec8a9 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c @@ -48,7 +48,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par) NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_POST_ROUTING || - par->hooknum == NF_INET_LOCAL_OUT); + par->hooknum == NF_INET_LOCAL_OUT || + par->hooknum == NF_INET_LOCAL_IN); ct = nf_ct_get(skb, &ctinfo); netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); @@ -77,7 +78,8 @@ static struct xt_target netmap_tg_reg __read_mostly = { .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | - (1 << NF_INET_LOCAL_OUT), + (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_LOCAL_IN), .checkentry = netmap_tg_check, .me = THIS_MODULE }; diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index f5f4a888e4ec71243f2d6589507355f28273c5e9..b254dafaf4294548b7d36d9b54ce29832f8658e3 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -95,10 +95,11 @@ static void send_reset(struct sk_buff *oldskb, int hook) } tcph->rst = 1; - tcph->check = tcp_v4_check(sizeof(struct tcphdr), - niph->saddr, niph->daddr, - csum_partial(tcph, - sizeof(struct tcphdr), 0)); + tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, + niph->daddr, 0); + nskb->ip_summed = CHECKSUM_PARTIAL; + nskb->csum_start = (unsigned char *)tcph - nskb->head; + nskb->csum_offset = offsetof(struct tcphdr, check); addr_type = RTN_UNSPEC; if (hook != NF_INET_FORWARD @@ -109,13 +110,12 @@ static void send_reset(struct sk_buff *oldskb, int hook) addr_type = RTN_LOCAL; /* ip_route_me_harder expects skb->dst to be set */ - skb_dst_set(nskb, dst_clone(skb_dst(oldskb))); + skb_dst_set_noref(nskb, skb_dst(oldskb)); if (ip_route_me_harder(nskb, addr_type)) goto free_nskb; niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT); - nskb->ip_summed = CHECKSUM_NONE; /* "Never happens" */ if (nskb->len > dst_mtu(skb_dst(nskb))) diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index cb763ae9ed9035f5be305eb33dda2e8118d671b3..eab8de32f200af74828bbf0fb3187ff578451e9b 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -66,6 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, const struct net_device *out, int (*okfn)(struct sk_buff *)) { + struct inet_sock *inet = inet_sk(skb->sk); + + if (inet && inet->nodefrag) + return NF_ACCEPT; + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE) /* Previously seen (loopback)? Ignore. Do this before diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 4f8bddb760c9cb47d6931647d8b03ece5fef5fc0..8c8632d9b93cead0cd115945a9566d1e57829667 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -261,14 +261,9 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, rcu_read_lock(); proto = __nf_nat_proto_find(orig_tuple->dst.protonum); - /* Change protocol info to have some randomization */ - if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { - proto->unique_tuple(tuple, range, maniptype, ct); - goto out; - } - /* Only bother mapping if it's not already in range and unique */ - if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || + if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM) && + (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || proto->in_range(tuple, maniptype, &range->min, &range->max)) && !nf_nat_used_tuple(tuple, ct)) goto out; @@ -440,7 +435,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) return 0; - inside = (void *)skb->data + ip_hdrlen(skb); + inside = (void *)skb->data + hdrlen; /* We're actually going to mangle it beyond trivial checksum adjustment, so make sure the current checksum is correct. */ @@ -470,12 +465,10 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, /* rcu_read_lock()ed by nf_hook_slow */ l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); - if (!nf_ct_get_tuple(skb, - ip_hdrlen(skb) + sizeof(struct icmphdr), - (ip_hdrlen(skb) + + if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr), + (hdrlen + sizeof(struct icmphdr) + inside->ip.ihl * 4), - (u_int16_t)AF_INET, - inside->ip.protocol, + (u_int16_t)AF_INET, inside->ip.protocol, &inner, l3proto, l4proto)) return 0; @@ -484,15 +477,13 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, pass all hooks (locally-generated ICMP). Consider incoming packet: PREROUTING (DST manip), routing produces ICMP, goes through POSTROUTING (which must correct the DST manip). */ - if (!manip_pkt(inside->ip.protocol, skb, - ip_hdrlen(skb) + sizeof(inside->icmp), - &ct->tuplehash[!dir].tuple, - !manip)) + if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp), + &ct->tuplehash[!dir].tuple, !manip)) return 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { /* Reloading "inside" here since manip_pkt inner. */ - inside = (void *)skb->data + ip_hdrlen(skb); + inside = (void *)skb->data + hdrlen; inside->icmp.checksum = 0; inside->icmp.checksum = csum_fold(skb_checksum(skb, hdrlen, @@ -742,7 +733,7 @@ static int __init nf_nat_init(void) spin_unlock_bh(&nf_nat_lock); /* Initialize fake conntrack so that NAT will skip it */ - nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; + nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c index 6c4f11f514461a5a244ba1d70180f42ad82940b4..3e61faf23a9a0c8636433dd9a1805274dfce4691 100644 --- a/net/ipv4/netfilter/nf_nat_proto_common.c +++ b/net/ipv4/netfilter/nf_nat_proto_common.c @@ -34,7 +34,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, } EXPORT_SYMBOL_GPL(nf_nat_proto_in_range); -bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, +void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct, @@ -53,7 +53,7 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { /* If it's dst rewrite, can't change port */ if (maniptype == IP_NAT_MANIP_DST) - return false; + return; if (ntohs(*portptr) < 1024) { /* Loose convention: >> 512 is credential passing */ @@ -81,15 +81,15 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, else off = *rover; - for (i = 0; i < range_size; i++, off++) { + for (i = 0; ; ++off) { *portptr = htons(min + off % range_size); - if (nf_nat_used_tuple(tuple, ct)) + if (++i != range_size && nf_nat_used_tuple(tuple, ct)) continue; if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) *rover = off; - return true; + return; } - return false; + return; } EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple); diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/ipv4/netfilter/nf_nat_proto_dccp.c index 22485ce306d41f2d175786bdc9fc7f20160306e2..570faf2667b26e70f3f7ddc6e2f9c89cdbfdcfd9 100644 --- a/net/ipv4/netfilter/nf_nat_proto_dccp.c +++ b/net/ipv4/netfilter/nf_nat_proto_dccp.c @@ -22,14 +22,14 @@ static u_int16_t dccp_port_rover; -static bool +static void dccp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, - &dccp_port_rover); + nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, + &dccp_port_rover); } static bool diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c index d7e89201351e90d01bbbac1666e913f90e036dbf..bc8d83a31c73ae4abe371e74c7b0df3e6068c03a 100644 --- a/net/ipv4/netfilter/nf_nat_proto_gre.c +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c @@ -37,7 +37,7 @@ MODULE_AUTHOR("Harald Welte "); MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); /* generate unique tuple ... */ -static bool +static void gre_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, @@ -50,7 +50,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, /* If there is no master conntrack we are not PPTP, do not change tuples */ if (!ct->master) - return false; + return; if (maniptype == IP_NAT_MANIP_SRC) keyptr = &tuple->src.u.gre.key; @@ -68,14 +68,14 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, pr_debug("min = %u, range_size = %u\n", min, range_size); - for (i = 0; i < range_size; i++, key++) { + for (i = 0; ; ++key) { *keyptr = htons(min + key % range_size); - if (!nf_nat_used_tuple(tuple, ct)) - return true; + if (++i == range_size || !nf_nat_used_tuple(tuple, ct)) + return; } pr_debug("%p: no NAT mapping\n", ct); - return false; + return; } /* manipulate a GRE packet according to maniptype */ diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c index 19a8b0b07d8e7d80620446939e80aa3966c84ace..5744c3ec847cc26440204b7584fdd60d857f7964 100644 --- a/net/ipv4/netfilter/nf_nat_proto_icmp.c +++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c @@ -27,7 +27,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple, ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); } -static bool +static void icmp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, @@ -42,13 +42,13 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple, if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) range_size = 0xFFFF; - for (i = 0; i < range_size; i++, id++) { + for (i = 0; ; ++id) { tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + (id % range_size)); - if (!nf_nat_used_tuple(tuple, ct)) - return true; + if (++i == range_size || !nf_nat_used_tuple(tuple, ct)) + return; } - return false; + return; } static bool diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c index 3fc598eeeb1a57cc8fc76f1c6ed7c1220ce21326..756331d42661cf12b2964256a8204b7a588f92f8 100644 --- a/net/ipv4/netfilter/nf_nat_proto_sctp.c +++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c @@ -16,14 +16,14 @@ static u_int16_t nf_sctp_port_rover; -static bool +static void sctp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, - &nf_sctp_port_rover); + nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, + &nf_sctp_port_rover); } static bool diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c index 399e2cfa263b72eaa8a2c49b078983cd67399914..aa460a595d5d930e019f9bd36eb010251a5dd33e 100644 --- a/net/ipv4/netfilter/nf_nat_proto_tcp.c +++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c @@ -20,14 +20,13 @@ static u_int16_t tcp_port_rover; -static bool +static void tcp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, - &tcp_port_rover); + nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover); } static bool diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c index 9e61c79492e4beb0d02a56c0f1bb4b42226630b7..dfe65c7e292586521ad9d1f804a3f82eade1a755 100644 --- a/net/ipv4/netfilter/nf_nat_proto_udp.c +++ b/net/ipv4/netfilter/nf_nat_proto_udp.c @@ -19,14 +19,13 @@ static u_int16_t udp_port_rover; -static bool +static void udp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, - &udp_port_rover); + nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover); } static bool diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c index 440a229bbd87df929816d53d02764d7879478ba1..3cc8c8af39ef2fe82905fdad69b0b64af0f4e770 100644 --- a/net/ipv4/netfilter/nf_nat_proto_udplite.c +++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c @@ -18,14 +18,14 @@ static u_int16_t udplite_port_rover; -static bool +static void udplite_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, - &udplite_port_rover); + nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, + &udplite_port_rover); } static bool diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c index 14381c62acea676e8553530b32d1f4220d90e6a1..a50f2bc1c7328805e755a204ca66253844dba702 100644 --- a/net/ipv4/netfilter/nf_nat_proto_unknown.c +++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c @@ -26,14 +26,14 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple, return true; } -static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple, +static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { /* Sorry: we can't help you; if it's not unique, we can't frob anything. */ - return false; + return; } static bool diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 98ed78281aee15eae5da8b99105c3f2da36836bd..ebbd319f62f56741ea2d22da8529cda0586089a0 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c @@ -28,7 +28,8 @@ #define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ (1 << NF_INET_POST_ROUTING) | \ - (1 << NF_INET_LOCAL_OUT)) + (1 << NF_INET_LOCAL_OUT) | \ + (1 << NF_INET_LOCAL_IN)) static const struct xt_table nat_table = { .name = "nat", @@ -45,7 +46,8 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par) enum ip_conntrack_info ctinfo; const struct nf_nat_multi_range_compat *mr = par->targinfo; - NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); + NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING || + par->hooknum == NF_INET_LOCAL_IN); ct = nf_ct_get(skb, &ctinfo); @@ -99,7 +101,7 @@ static int ipt_dnat_checkentry(const struct xt_tgchk_param *par) return 0; } -unsigned int +static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) { /* Force range to this IP; let proto decide mapping for @@ -141,7 +143,7 @@ static struct xt_target ipt_snat_reg __read_mostly = { .target = ipt_snat_target, .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", - .hooks = 1 << NF_INET_POST_ROUTING, + .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), .checkentry = ipt_snat_checkentry, .family = AF_INET, }; diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index beb25819c9c9f66c0fa5a1a40c398e860f3e10a8..95481fee8bdbfafec767a2ba2c1d19bef938fb85 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c @@ -98,7 +98,7 @@ nf_nat_fn(unsigned int hooknum, return NF_ACCEPT; /* Don't try to NAT if this packet is not conntracked */ - if (ct == &nf_conntrack_untracked) + if (nf_ct_is_untracked(ct)) return NF_ACCEPT; nat = nfct_nat(ct); @@ -131,13 +131,7 @@ nf_nat_fn(unsigned int hooknum, if (!nf_nat_initialized(ct, maniptype)) { unsigned int ret; - if (hooknum == NF_INET_LOCAL_IN) - /* LOCAL_IN hook doesn't have a chain! */ - ret = alloc_null_binding(ct, hooknum); - else - ret = nf_nat_rule_find(skb, hooknum, in, out, - ct); - + ret = nf_nat_rule_find(skb, hooknum, in, out, ct); if (ret != NF_ACCEPT) return ret; } else diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3dc9914c1dcee4f77faa26b97e5eb8ec3b4243b6..4ae1f203f7cbb5daf7c07bea4a062c352c83700d 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -252,6 +252,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), + SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), SNMP_MIB_SENTINEL }; @@ -342,10 +343,12 @@ static int snmp_seq_show(struct seq_file *seq, void *v) IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2, sysctl_ip_default_ttl); + BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0); for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) - seq_printf(seq, " %lu", - snmp_fold_field((void __percpu **)net->mib.ip_statistics, - snmp4_ipstats_list[i].entry)); + seq_printf(seq, " %llu", + snmp_fold_field64((void __percpu **)net->mib.ip_statistics, + snmp4_ipstats_list[i].entry, + offsetof(struct ipstats_mib, syncp))); icmp_put(seq); /* RFC 2011 compatibility */ icmpmsg_put(seq); @@ -431,9 +434,10 @@ static int netstat_seq_show(struct seq_file *seq, void *v) seq_puts(seq, "\nIpExt:"); for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) - seq_printf(seq, " %lu", - snmp_fold_field((void __percpu **)net->mib.ip_statistics, - snmp4_ipextstats_list[i].entry)); + seq_printf(seq, " %llu", + snmp_fold_field64((void __percpu **)net->mib.ip_statistics, + snmp4_ipextstats_list[i].entry, + offsetof(struct ipstats_mib, syncp))); seq_putc(seq, '\n'); return 0; diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 542f22fc98b3948f80ba1f30027106ddeb882047..f2d297351405d8fab1262bb308142202d72e7f5b 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c @@ -52,6 +52,7 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) return ret; } +EXPORT_SYMBOL(inet_add_protocol); /* * Remove a protocol from the hash tables. @@ -76,6 +77,4 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) return ret; } - -EXPORT_SYMBOL(inet_add_protocol); EXPORT_SYMBOL(inet_del_protocol); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2c7a1639388aea5e18223cc1ef0cc72312276792..009a7b2aa1ef0493f542b4745b334e2964c0b826 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -314,7 +314,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) } static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, - struct rtable *rt, + struct rtable **rtp, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); @@ -323,25 +323,27 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, struct sk_buff *skb; unsigned int iphlen; int err; + struct rtable *rt = *rtp; - if (length > rt->u.dst.dev->mtu) { + if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, - rt->u.dst.dev->mtu); + rt->dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; skb = sock_alloc_send_skb(sk, - length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, + length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; - skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); + skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - skb_dst_set(skb, dst_clone(&rt->u.dst)); + skb_dst_set(skb, &rt->dst); + *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); @@ -373,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, iph->check = 0; iph->tot_len = htons(length); if (!iph->id) - ip_select_ident(iph, &rt->u.dst, NULL); + ip_select_ident(iph, &rt->dst, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } @@ -382,7 +384,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, skb_transport_header(skb))->type); err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, - rt->u.dst.dev, dst_output); + rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) @@ -576,7 +578,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (inet->hdrincl) err = raw_send_hdrinc(sk, msg->msg_iov, len, - rt, msg->msg_flags); + &rt, msg->msg_flags); else { if (!ipc.addr) @@ -604,7 +606,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, return len; do_confirm: - dst_confirm(&rt->u.dst); + dst_confirm(&rt->dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 560acc677ce485cce87a0161fa709f5e50b8e56f..3f56b6e6c6aab583d65902e7190bbf1a6eaa60b6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -253,8 +253,7 @@ static unsigned rt_hash_mask __read_mostly; static unsigned int rt_hash_log __read_mostly; static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); -#define RT_CACHE_STAT_INC(field) \ - (__raw_get_cpu_var(rt_cache_stat).field++) +#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, int genid) @@ -287,10 +286,10 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq) rcu_read_lock_bh(); r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); while (r) { - if (dev_net(r->u.dst.dev) == seq_file_net(seq) && + if (dev_net(r->dst.dev) == seq_file_net(seq) && r->rt_genid == st->genid) return r; - r = rcu_dereference_bh(r->u.dst.rt_next); + r = rcu_dereference_bh(r->dst.rt_next); } rcu_read_unlock_bh(); } @@ -302,7 +301,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq, { struct rt_cache_iter_state *st = seq->private; - r = r->u.dst.rt_next; + r = r->dst.rt_next; while (!r) { rcu_read_unlock_bh(); do { @@ -320,7 +319,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, { struct rt_cache_iter_state *st = seq->private; while ((r = __rt_cache_get_next(seq, r)) != NULL) { - if (dev_net(r->u.dst.dev) != seq_file_net(seq)) + if (dev_net(r->dst.dev) != seq_file_net(seq)) continue; if (r->rt_genid == st->genid) break; @@ -378,19 +377,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", - r->u.dst.dev ? r->u.dst.dev->name : "*", + r->dst.dev ? r->dst.dev->name : "*", (__force u32)r->rt_dst, (__force u32)r->rt_gateway, - r->rt_flags, atomic_read(&r->u.dst.__refcnt), - r->u.dst.__use, 0, (__force u32)r->rt_src, - (dst_metric(&r->u.dst, RTAX_ADVMSS) ? - (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), - dst_metric(&r->u.dst, RTAX_WINDOW), - (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) + - dst_metric(&r->u.dst, RTAX_RTTVAR)), + r->rt_flags, atomic_read(&r->dst.__refcnt), + r->dst.__use, 0, (__force u32)r->rt_src, + (dst_metric(&r->dst, RTAX_ADVMSS) ? + (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0), + dst_metric(&r->dst, RTAX_WINDOW), + (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + + dst_metric(&r->dst, RTAX_RTTVAR)), r->fl.fl4_tos, - r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1, - r->u.dst.hh ? (r->u.dst.hh->hh_output == + r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, + r->dst.hh ? (r->dst.hh->hh_output == dev_queue_xmit) : 0, r->rt_spec_dst, &len); @@ -609,13 +608,13 @@ static inline int ip_rt_proc_init(void) static inline void rt_free(struct rtable *rt) { - call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); + call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } static inline void rt_drop(struct rtable *rt) { ip_rt_put(rt); - call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); + call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } static inline int rt_fast_clean(struct rtable *rth) @@ -623,13 +622,13 @@ static inline int rt_fast_clean(struct rtable *rth) /* Kill broadcast/multicast entries very aggresively, if they collide in hash table with more useful entries */ return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && - rth->fl.iif && rth->u.dst.rt_next; + rth->fl.iif && rth->dst.rt_next; } static inline int rt_valuable(struct rtable *rth) { return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || - rth->u.dst.expires; + rth->dst.expires; } static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) @@ -637,15 +636,15 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t unsigned long age; int ret = 0; - if (atomic_read(&rth->u.dst.__refcnt)) + if (atomic_read(&rth->dst.__refcnt)) goto out; ret = 1; - if (rth->u.dst.expires && - time_after_eq(jiffies, rth->u.dst.expires)) + if (rth->dst.expires && + time_after_eq(jiffies, rth->dst.expires)) goto out; - age = jiffies - rth->u.dst.lastuse; + age = jiffies - rth->dst.lastuse; ret = 0; if ((age <= tmo1 && !rt_fast_clean(rth)) || (age <= tmo2 && rt_valuable(rth))) @@ -661,7 +660,7 @@ out: return ret; */ static inline u32 rt_score(struct rtable *rt) { - u32 score = jiffies - rt->u.dst.lastuse; + u32 score = jiffies - rt->dst.lastuse; score = ~score & ~(3<<30); @@ -701,12 +700,12 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) { - return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev)); + return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev)); } static inline int rt_is_expired(struct rtable *rth) { - return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); + return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); } /* @@ -735,7 +734,7 @@ static void rt_do_flush(int process_context) rth = rt_hash_table[i].chain; /* defer releasing the head of the list after spin_unlock */ - for (tail = rth; tail; tail = tail->u.dst.rt_next) + for (tail = rth; tail; tail = tail->dst.rt_next) if (!rt_is_expired(tail)) break; if (rth != tail) @@ -744,9 +743,9 @@ static void rt_do_flush(int process_context) /* call rt_free on entries after the tail requiring flush */ prev = &rt_hash_table[i].chain; for (p = *prev; p; p = next) { - next = p->u.dst.rt_next; + next = p->dst.rt_next; if (!rt_is_expired(p)) { - prev = &p->u.dst.rt_next; + prev = &p->dst.rt_next; } else { *prev = next; rt_free(p); @@ -761,7 +760,7 @@ static void rt_do_flush(int process_context) spin_unlock_bh(rt_hash_lock_addr(i)); for (; rth != tail; rth = next) { - next = rth->u.dst.rt_next; + next = rth->dst.rt_next; rt_free(rth); } } @@ -792,7 +791,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) while (aux != rth) { if (compare_hash_inputs(&aux->fl, &rth->fl)) return 0; - aux = aux->u.dst.rt_next; + aux = aux->dst.rt_next; } return ONE; } @@ -832,18 +831,18 @@ static void rt_check_expire(void) length = 0; spin_lock_bh(rt_hash_lock_addr(i)); while ((rth = *rthp) != NULL) { - prefetch(rth->u.dst.rt_next); + prefetch(rth->dst.rt_next); if (rt_is_expired(rth)) { - *rthp = rth->u.dst.rt_next; + *rthp = rth->dst.rt_next; rt_free(rth); continue; } - if (rth->u.dst.expires) { + if (rth->dst.expires) { /* Entry is expired even if it is in use */ - if (time_before_eq(jiffies, rth->u.dst.expires)) { + if (time_before_eq(jiffies, rth->dst.expires)) { nofree: tmo >>= 1; - rthp = &rth->u.dst.rt_next; + rthp = &rth->dst.rt_next; /* * We only count entries on * a chain with equal hash inputs once @@ -859,7 +858,7 @@ static void rt_check_expire(void) goto nofree; /* Cleanup aged off entries. */ - *rthp = rth->u.dst.rt_next; + *rthp = rth->dst.rt_next; rt_free(rth); } spin_unlock_bh(rt_hash_lock_addr(i)); @@ -1000,10 +999,10 @@ static int rt_garbage_collect(struct dst_ops *ops) if (!rt_is_expired(rth) && !rt_may_expire(rth, tmo, expire)) { tmo >>= 1; - rthp = &rth->u.dst.rt_next; + rthp = &rth->dst.rt_next; continue; } - *rthp = rth->u.dst.rt_next; + *rthp = rth->dst.rt_next; rt_free(rth); goal--; } @@ -1069,7 +1068,7 @@ static int slow_chain_length(const struct rtable *head) while (rth) { length += has_noalias(head, rth); - rth = rth->u.dst.rt_next; + rth = rth->dst.rt_next; } return length >> FRACT_BITS; } @@ -1091,7 +1090,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, candp = NULL; now = jiffies; - if (!rt_caching(dev_net(rt->u.dst.dev))) { + if (!rt_caching(dev_net(rt->dst.dev))) { /* * If we're not caching, just tell the caller we * were successful and don't touch the route. The @@ -1109,7 +1108,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, */ if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { - int err = arp_bind_neighbour(&rt->u.dst); + int err = arp_bind_neighbour(&rt->dst); if (err) { if (net_ratelimit()) printk(KERN_WARNING @@ -1128,19 +1127,19 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, spin_lock_bh(rt_hash_lock_addr(hash)); while ((rth = *rthp) != NULL) { if (rt_is_expired(rth)) { - *rthp = rth->u.dst.rt_next; + *rthp = rth->dst.rt_next; rt_free(rth); continue; } if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { /* Put it first */ - *rthp = rth->u.dst.rt_next; + *rthp = rth->dst.rt_next; /* * Since lookup is lockfree, the deletion * must be visible to another weakly ordered CPU before * the insertion at the start of the hash chain. */ - rcu_assign_pointer(rth->u.dst.rt_next, + rcu_assign_pointer(rth->dst.rt_next, rt_hash_table[hash].chain); /* * Since lookup is lockfree, the update writes @@ -1148,18 +1147,18 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, */ rcu_assign_pointer(rt_hash_table[hash].chain, rth); - dst_use(&rth->u.dst, now); + dst_use(&rth->dst, now); spin_unlock_bh(rt_hash_lock_addr(hash)); rt_drop(rt); if (rp) *rp = rth; else - skb_dst_set(skb, &rth->u.dst); + skb_dst_set(skb, &rth->dst); return 0; } - if (!atomic_read(&rth->u.dst.__refcnt)) { + if (!atomic_read(&rth->dst.__refcnt)) { u32 score = rt_score(rth); if (score <= min_score) { @@ -1171,7 +1170,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, chain_length++; - rthp = &rth->u.dst.rt_next; + rthp = &rth->dst.rt_next; } if (cand) { @@ -1182,17 +1181,17 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, * only 2 entries per bucket. We will see. */ if (chain_length > ip_rt_gc_elasticity) { - *candp = cand->u.dst.rt_next; + *candp = cand->dst.rt_next; rt_free(cand); } } else { if (chain_length > rt_chain_length_max && slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { - struct net *net = dev_net(rt->u.dst.dev); + struct net *net = dev_net(rt->dst.dev); int num = ++net->ipv4.current_rt_cache_rebuild_count; if (!rt_caching(net)) { printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", - rt->u.dst.dev->name, num); + rt->dst.dev->name, num); } rt_emergency_hash_rebuild(net); spin_unlock_bh(rt_hash_lock_addr(hash)); @@ -1207,7 +1206,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, route or unicast forwarding path. */ if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { - int err = arp_bind_neighbour(&rt->u.dst); + int err = arp_bind_neighbour(&rt->dst); if (err) { spin_unlock_bh(rt_hash_lock_addr(hash)); @@ -1238,14 +1237,14 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, } } - rt->u.dst.rt_next = rt_hash_table[hash].chain; + rt->dst.rt_next = rt_hash_table[hash].chain; #if RT_CACHE_DEBUG >= 2 - if (rt->u.dst.rt_next) { + if (rt->dst.rt_next) { struct rtable *trt; printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst); - for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) + for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next) printk(" . %pI4", &trt->rt_dst); printk("\n"); } @@ -1263,7 +1262,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, if (rp) *rp = rt; else - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); return 0; } @@ -1325,6 +1324,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) ip_select_fb_ident(iph); } +EXPORT_SYMBOL(__ip_select_ident); static void rt_del(unsigned hash, struct rtable *rt) { @@ -1335,20 +1335,21 @@ static void rt_del(unsigned hash, struct rtable *rt) ip_rt_put(rt); while ((aux = *rthp) != NULL) { if (aux == rt || rt_is_expired(aux)) { - *rthp = aux->u.dst.rt_next; + *rthp = aux->dst.rt_next; rt_free(aux); continue; } - rthp = &aux->u.dst.rt_next; + rthp = &aux->dst.rt_next; } spin_unlock_bh(rt_hash_lock_addr(hash)); } +/* called in rcu_read_lock() section */ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, __be32 saddr, struct net_device *dev) { int i, k; - struct in_device *in_dev = in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rcu(dev); struct rtable *rth, **rthp; __be32 skeys[2] = { saddr, 0 }; int ikeys[2] = { dev->ifindex, 0 }; @@ -1384,7 +1385,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, rthp=&rt_hash_table[hash].chain; - rcu_read_lock(); while ((rth = rcu_dereference(*rthp)) != NULL) { struct rtable *rt; @@ -1393,44 +1393,42 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, rth->fl.oif != ikeys[k] || rth->fl.iif != 0 || rt_is_expired(rth) || - !net_eq(dev_net(rth->u.dst.dev), net)) { - rthp = &rth->u.dst.rt_next; + !net_eq(dev_net(rth->dst.dev), net)) { + rthp = &rth->dst.rt_next; continue; } if (rth->rt_dst != daddr || rth->rt_src != saddr || - rth->u.dst.error || + rth->dst.error || rth->rt_gateway != old_gw || - rth->u.dst.dev != dev) + rth->dst.dev != dev) break; - dst_hold(&rth->u.dst); - rcu_read_unlock(); + dst_hold(&rth->dst); rt = dst_alloc(&ipv4_dst_ops); if (rt == NULL) { ip_rt_put(rth); - in_dev_put(in_dev); return; } /* Copy all the information. */ *rt = *rth; - rt->u.dst.__use = 1; - atomic_set(&rt->u.dst.__refcnt, 1); - rt->u.dst.child = NULL; - if (rt->u.dst.dev) - dev_hold(rt->u.dst.dev); + rt->dst.__use = 1; + atomic_set(&rt->dst.__refcnt, 1); + rt->dst.child = NULL; + if (rt->dst.dev) + dev_hold(rt->dst.dev); if (rt->idev) in_dev_hold(rt->idev); - rt->u.dst.obsolete = -1; - rt->u.dst.lastuse = jiffies; - rt->u.dst.path = &rt->u.dst; - rt->u.dst.neighbour = NULL; - rt->u.dst.hh = NULL; + rt->dst.obsolete = -1; + rt->dst.lastuse = jiffies; + rt->dst.path = &rt->dst; + rt->dst.neighbour = NULL; + rt->dst.hh = NULL; #ifdef CONFIG_XFRM - rt->u.dst.xfrm = NULL; + rt->dst.xfrm = NULL; #endif rt->rt_genid = rt_genid(net); rt->rt_flags |= RTCF_REDIRECTED; @@ -1439,23 +1437,23 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, rt->rt_gateway = new_gw; /* Redirect received -> path was valid */ - dst_confirm(&rth->u.dst); + dst_confirm(&rth->dst); if (rt->peer) atomic_inc(&rt->peer->refcnt); - if (arp_bind_neighbour(&rt->u.dst) || - !(rt->u.dst.neighbour->nud_state & + if (arp_bind_neighbour(&rt->dst) || + !(rt->dst.neighbour->nud_state & NUD_VALID)) { - if (rt->u.dst.neighbour) - neigh_event_send(rt->u.dst.neighbour, NULL); + if (rt->dst.neighbour) + neigh_event_send(rt->dst.neighbour, NULL); ip_rt_put(rth); rt_drop(rt); goto do_next; } - netevent.old = &rth->u.dst; - netevent.new = &rt->u.dst; + netevent.old = &rth->dst; + netevent.new = &rt->dst; call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); @@ -1464,12 +1462,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, ip_rt_put(rt); goto do_next; } - rcu_read_unlock(); do_next: ; } } - in_dev_put(in_dev); return; reject_redirect: @@ -1480,7 +1476,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, &old_gw, dev->name, &new_gw, &saddr, &daddr); #endif - in_dev_put(in_dev); + ; } static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) @@ -1493,8 +1489,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) ip_rt_put(rt); ret = NULL; } else if ((rt->rt_flags & RTCF_REDIRECTED) || - (rt->u.dst.expires && - time_after_eq(jiffies, rt->u.dst.expires))) { + (rt->dst.expires && + time_after_eq(jiffies, rt->dst.expires))) { unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, rt->fl.oif, rt_genid(dev_net(dst->dev))); @@ -1532,7 +1528,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) int log_martians; rcu_read_lock(); - in_dev = __in_dev_get_rcu(rt->u.dst.dev); + in_dev = __in_dev_get_rcu(rt->dst.dev); if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { rcu_read_unlock(); return; @@ -1543,30 +1539,30 @@ void ip_rt_send_redirect(struct sk_buff *skb) /* No redirected packets during ip_rt_redirect_silence; * reset the algorithm. */ - if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence)) - rt->u.dst.rate_tokens = 0; + if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence)) + rt->dst.rate_tokens = 0; /* Too many ignored redirects; do not send anything - * set u.dst.rate_last to the last seen redirected packet. + * set dst.rate_last to the last seen redirected packet. */ - if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { - rt->u.dst.rate_last = jiffies; + if (rt->dst.rate_tokens >= ip_rt_redirect_number) { + rt->dst.rate_last = jiffies; return; } /* Check for load limit; set rate_last to the latest sent * redirect. */ - if (rt->u.dst.rate_tokens == 0 || + if (rt->dst.rate_tokens == 0 || time_after(jiffies, - (rt->u.dst.rate_last + - (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { + (rt->dst.rate_last + + (ip_rt_redirect_load << rt->dst.rate_tokens)))) { icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); - rt->u.dst.rate_last = jiffies; - ++rt->u.dst.rate_tokens; + rt->dst.rate_last = jiffies; + ++rt->dst.rate_tokens; #ifdef CONFIG_IP_ROUTE_VERBOSE if (log_martians && - rt->u.dst.rate_tokens == ip_rt_redirect_number && + rt->dst.rate_tokens == ip_rt_redirect_number && net_ratelimit()) printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", &rt->rt_src, rt->rt_iif, @@ -1581,7 +1577,7 @@ static int ip_error(struct sk_buff *skb) unsigned long now; int code; - switch (rt->u.dst.error) { + switch (rt->dst.error) { case EINVAL: default: goto out; @@ -1590,7 +1586,7 @@ static int ip_error(struct sk_buff *skb) break; case ENETUNREACH: code = ICMP_NET_UNREACH; - IP_INC_STATS_BH(dev_net(rt->u.dst.dev), + IP_INC_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INNOROUTES); break; case EACCES: @@ -1599,12 +1595,12 @@ static int ip_error(struct sk_buff *skb) } now = jiffies; - rt->u.dst.rate_tokens += now - rt->u.dst.rate_last; - if (rt->u.dst.rate_tokens > ip_rt_error_burst) - rt->u.dst.rate_tokens = ip_rt_error_burst; - rt->u.dst.rate_last = now; - if (rt->u.dst.rate_tokens >= ip_rt_error_cost) { - rt->u.dst.rate_tokens -= ip_rt_error_cost; + rt->dst.rate_tokens += now - rt->dst.rate_last; + if (rt->dst.rate_tokens > ip_rt_error_burst) + rt->dst.rate_tokens = ip_rt_error_burst; + rt->dst.rate_last = now; + if (rt->dst.rate_tokens >= ip_rt_error_cost) { + rt->dst.rate_tokens -= ip_rt_error_cost; icmp_send(skb, ICMP_DEST_UNREACH, code, 0); } @@ -1649,7 +1645,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, rcu_read_lock(); for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; - rth = rcu_dereference(rth->u.dst.rt_next)) { + rth = rcu_dereference(rth->dst.rt_next)) { unsigned short mtu = new_mtu; if (rth->fl.fl4_dst != daddr || @@ -1658,8 +1654,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, rth->rt_src != iph->saddr || rth->fl.oif != ikeys[k] || rth->fl.iif != 0 || - dst_metric_locked(&rth->u.dst, RTAX_MTU) || - !net_eq(dev_net(rth->u.dst.dev), net) || + dst_metric_locked(&rth->dst, RTAX_MTU) || + !net_eq(dev_net(rth->dst.dev), net) || rt_is_expired(rth)) continue; @@ -1667,22 +1663,22 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, /* BSD 4.2 compatibility hack :-( */ if (mtu == 0 && - old_mtu >= dst_mtu(&rth->u.dst) && + old_mtu >= dst_mtu(&rth->dst) && old_mtu >= 68 + (iph->ihl << 2)) old_mtu -= iph->ihl << 2; mtu = guess_mtu(old_mtu); } - if (mtu <= dst_mtu(&rth->u.dst)) { - if (mtu < dst_mtu(&rth->u.dst)) { - dst_confirm(&rth->u.dst); + if (mtu <= dst_mtu(&rth->dst)) { + if (mtu < dst_mtu(&rth->dst)) { + dst_confirm(&rth->dst); if (mtu < ip_rt_min_pmtu) { mtu = ip_rt_min_pmtu; - rth->u.dst.metrics[RTAX_LOCK-1] |= + rth->dst.metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU); } - rth->u.dst.metrics[RTAX_MTU-1] = mtu; - dst_set_expires(&rth->u.dst, + rth->dst.metrics[RTAX_MTU-1] = mtu; + dst_set_expires(&rth->dst, ip_rt_mtu_expires); } est_mtu = mtu; @@ -1755,7 +1751,7 @@ static void ipv4_link_failure(struct sk_buff *skb) rt = skb_rtable(skb); if (rt) - dst_set_expires(&rt->u.dst, 0); + dst_set_expires(&rt->dst, 0); } static int ip_rt_bug(struct sk_buff *skb) @@ -1783,11 +1779,11 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) if (rt->fl.iif == 0) src = rt->rt_src; - else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) { + else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) { src = FIB_RES_PREFSRC(res); fib_res_put(&res); } else - src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, + src = inet_select_addr(rt->dst.dev, rt->rt_gateway, RT_SCOPE_UNIVERSE); memcpy(addr, &src, 4); } @@ -1795,10 +1791,10 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) #ifdef CONFIG_NET_CLS_ROUTE static void set_class_tag(struct rtable *rt, u32 tag) { - if (!(rt->u.dst.tclassid & 0xFFFF)) - rt->u.dst.tclassid |= tag & 0xFFFF; - if (!(rt->u.dst.tclassid & 0xFFFF0000)) - rt->u.dst.tclassid |= tag & 0xFFFF0000; + if (!(rt->dst.tclassid & 0xFFFF)) + rt->dst.tclassid |= tag & 0xFFFF; + if (!(rt->dst.tclassid & 0xFFFF0000)) + rt->dst.tclassid |= tag & 0xFFFF0000; } #endif @@ -1810,30 +1806,30 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) rt->rt_gateway = FIB_RES_GW(*res); - memcpy(rt->u.dst.metrics, fi->fib_metrics, - sizeof(rt->u.dst.metrics)); + memcpy(rt->dst.metrics, fi->fib_metrics, + sizeof(rt->dst.metrics)); if (fi->fib_mtu == 0) { - rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; - if (dst_metric_locked(&rt->u.dst, RTAX_MTU) && + rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; + if (dst_metric_locked(&rt->dst, RTAX_MTU) && rt->rt_gateway != rt->rt_dst && - rt->u.dst.dev->mtu > 576) - rt->u.dst.metrics[RTAX_MTU-1] = 576; + rt->dst.dev->mtu > 576) + rt->dst.metrics[RTAX_MTU-1] = 576; } #ifdef CONFIG_NET_CLS_ROUTE - rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid; + rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid; #endif } else - rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; - - if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) - rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; - if (dst_mtu(&rt->u.dst) > IP_MAX_MTU) - rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; - if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) - rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, + rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu; + + if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0) + rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; + if (dst_mtu(&rt->dst) > IP_MAX_MTU) + rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; + if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0) + rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40, ip_rt_min_advmss); - if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40) - rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; + if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40) + rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; #ifdef CONFIG_NET_CLS_ROUTE #ifdef CONFIG_IP_MULTIPLE_TABLES @@ -1844,14 +1840,16 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) rt->rt_type = res->type; } +/* called in rcu_read_lock() section */ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev, int our) { - unsigned hash; + unsigned int hash; struct rtable *rth; __be32 spec_dst; - struct in_device *in_dev = in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rcu(dev); u32 itag = 0; + int err; /* Primary sanity checks. */ @@ -1866,21 +1864,23 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (!ipv4_is_local_multicast(daddr)) goto e_inval; spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); - } else if (fib_validate_source(saddr, 0, tos, 0, - dev, &spec_dst, &itag, 0) < 0) - goto e_inval; - + } else { + err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, + &itag, 0); + if (err < 0) + goto e_err; + } rth = dst_alloc(&ipv4_dst_ops); if (!rth) goto e_nobufs; - rth->u.dst.output = ip_rt_bug; - rth->u.dst.obsolete = -1; + rth->dst.output = ip_rt_bug; + rth->dst.obsolete = -1; - atomic_set(&rth->u.dst.__refcnt, 1); - rth->u.dst.flags= DST_HOST; + atomic_set(&rth->dst.__refcnt, 1); + rth->dst.flags= DST_HOST; if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) - rth->u.dst.flags |= DST_NOPOLICY; + rth->dst.flags |= DST_NOPOLICY; rth->fl.fl4_dst = daddr; rth->rt_dst = daddr; rth->fl.fl4_tos = tos; @@ -1888,13 +1888,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->fl.fl4_src = saddr; rth->rt_src = saddr; #ifdef CONFIG_NET_CLS_ROUTE - rth->u.dst.tclassid = itag; + rth->dst.tclassid = itag; #endif rth->rt_iif = rth->fl.iif = dev->ifindex; - rth->u.dst.dev = init_net.loopback_dev; - dev_hold(rth->u.dst.dev); - rth->idev = in_dev_get(rth->u.dst.dev); + rth->dst.dev = init_net.loopback_dev; + dev_hold(rth->dst.dev); + rth->idev = in_dev_get(rth->dst.dev); rth->fl.oif = 0; rth->rt_gateway = daddr; rth->rt_spec_dst= spec_dst; @@ -1902,27 +1902,25 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->rt_flags = RTCF_MULTICAST; rth->rt_type = RTN_MULTICAST; if (our) { - rth->u.dst.input= ip_local_deliver; + rth->dst.input= ip_local_deliver; rth->rt_flags |= RTCF_LOCAL; } #ifdef CONFIG_IP_MROUTE if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) - rth->u.dst.input = ip_mr_input; + rth->dst.input = ip_mr_input; #endif RT_CACHE_STAT_INC(in_slow_mc); - in_dev_put(in_dev); hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); e_nobufs: - in_dev_put(in_dev); return -ENOBUFS; - e_inval: - in_dev_put(in_dev); return -EINVAL; +e_err: + return err; } @@ -1956,22 +1954,22 @@ static void ip_handle_martian_source(struct net_device *dev, #endif } +/* called in rcu_read_lock() section */ static int __mkroute_input(struct sk_buff *skb, struct fib_result *res, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos, struct rtable **result) { - struct rtable *rth; int err; struct in_device *out_dev; - unsigned flags = 0; + unsigned int flags = 0; __be32 spec_dst; u32 itag; /* get a working reference to the output device */ - out_dev = in_dev_get(FIB_RES_DEV(*res)); + out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); if (out_dev == NULL) { if (net_ratelimit()) printk(KERN_CRIT "Bug in ip_route_input" \ @@ -1986,7 +1984,6 @@ static int __mkroute_input(struct sk_buff *skb, ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); - err = -EINVAL; goto cleanup; } @@ -2020,12 +2017,12 @@ static int __mkroute_input(struct sk_buff *skb, goto cleanup; } - atomic_set(&rth->u.dst.__refcnt, 1); - rth->u.dst.flags= DST_HOST; + atomic_set(&rth->dst.__refcnt, 1); + rth->dst.flags= DST_HOST; if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) - rth->u.dst.flags |= DST_NOPOLICY; + rth->dst.flags |= DST_NOPOLICY; if (IN_DEV_CONF_GET(out_dev, NOXFRM)) - rth->u.dst.flags |= DST_NOXFRM; + rth->dst.flags |= DST_NOXFRM; rth->fl.fl4_dst = daddr; rth->rt_dst = daddr; rth->fl.fl4_tos = tos; @@ -2035,16 +2032,16 @@ static int __mkroute_input(struct sk_buff *skb, rth->rt_gateway = daddr; rth->rt_iif = rth->fl.iif = in_dev->dev->ifindex; - rth->u.dst.dev = (out_dev)->dev; - dev_hold(rth->u.dst.dev); - rth->idev = in_dev_get(rth->u.dst.dev); + rth->dst.dev = (out_dev)->dev; + dev_hold(rth->dst.dev); + rth->idev = in_dev_get(rth->dst.dev); rth->fl.oif = 0; rth->rt_spec_dst= spec_dst; - rth->u.dst.obsolete = -1; - rth->u.dst.input = ip_forward; - rth->u.dst.output = ip_output; - rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); + rth->dst.obsolete = -1; + rth->dst.input = ip_forward; + rth->dst.output = ip_output; + rth->rt_genid = rt_genid(dev_net(rth->dst.dev)); rt_set_nexthop(rth, res, itag); @@ -2053,8 +2050,6 @@ static int __mkroute_input(struct sk_buff *skb, *result = rth; err = 0; cleanup: - /* release the working reference to the output device */ - in_dev_put(out_dev); return err; } @@ -2080,7 +2075,7 @@ static int ip_mkroute_input(struct sk_buff *skb, /* put it into the cache */ hash = rt_hash(daddr, saddr, fl->iif, - rt_genid(dev_net(rth->u.dst.dev))); + rt_genid(dev_net(rth->dst.dev))); return rt_intern_hash(hash, rth, NULL, skb, fl->iif); } @@ -2098,7 +2093,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev) { struct fib_result res; - struct in_device *in_dev = in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rcu(dev); struct flowi fl = { .nl_u = { .ip4_u = { .daddr = daddr, .saddr = saddr, @@ -2158,13 +2153,12 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, goto brd_input; if (res.type == RTN_LOCAL) { - int result; - result = fib_validate_source(saddr, daddr, tos, + err = fib_validate_source(saddr, daddr, tos, net->loopback_dev->ifindex, dev, &spec_dst, &itag, skb->mark); - if (result < 0) - goto martian_source; - if (result) + if (err < 0) + goto martian_source_keep_err; + if (err) flags |= RTCF_DIRECTSRC; spec_dst = daddr; goto local_input; @@ -2177,7 +2171,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos); done: - in_dev_put(in_dev); if (free_res) fib_res_put(&res); out: return err; @@ -2192,7 +2185,7 @@ out: return err; err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, &itag, skb->mark); if (err < 0) - goto martian_source; + goto martian_source_keep_err; if (err) flags |= RTCF_DIRECTSRC; } @@ -2205,14 +2198,14 @@ out: return err; if (!rth) goto e_nobufs; - rth->u.dst.output= ip_rt_bug; - rth->u.dst.obsolete = -1; + rth->dst.output= ip_rt_bug; + rth->dst.obsolete = -1; rth->rt_genid = rt_genid(net); - atomic_set(&rth->u.dst.__refcnt, 1); - rth->u.dst.flags= DST_HOST; + atomic_set(&rth->dst.__refcnt, 1); + rth->dst.flags= DST_HOST; if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) - rth->u.dst.flags |= DST_NOPOLICY; + rth->dst.flags |= DST_NOPOLICY; rth->fl.fl4_dst = daddr; rth->rt_dst = daddr; rth->fl.fl4_tos = tos; @@ -2220,20 +2213,20 @@ out: return err; rth->fl.fl4_src = saddr; rth->rt_src = saddr; #ifdef CONFIG_NET_CLS_ROUTE - rth->u.dst.tclassid = itag; + rth->dst.tclassid = itag; #endif rth->rt_iif = rth->fl.iif = dev->ifindex; - rth->u.dst.dev = net->loopback_dev; - dev_hold(rth->u.dst.dev); - rth->idev = in_dev_get(rth->u.dst.dev); + rth->dst.dev = net->loopback_dev; + dev_hold(rth->dst.dev); + rth->idev = in_dev_get(rth->dst.dev); rth->rt_gateway = daddr; rth->rt_spec_dst= spec_dst; - rth->u.dst.input= ip_local_deliver; + rth->dst.input= ip_local_deliver; rth->rt_flags = flags|RTCF_LOCAL; if (res.type == RTN_UNREACHABLE) { - rth->u.dst.input= ip_error; - rth->u.dst.error= -err; + rth->dst.input= ip_error; + rth->dst.error= -err; rth->rt_flags &= ~RTCF_LOCAL; } rth->rt_type = res.type; @@ -2273,8 +2266,10 @@ out: return err; goto done; martian_source: + err = -EINVAL; +martian_source_keep_err: ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); - goto e_inval; + goto done; } int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, @@ -2284,32 +2279,34 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, unsigned hash; int iif = dev->ifindex; struct net *net; + int res; net = dev_net(dev); + rcu_read_lock(); + if (!rt_caching(net)) goto skip_cache; tos &= IPTOS_RT_MASK; hash = rt_hash(daddr, saddr, iif, rt_genid(net)); - rcu_read_lock(); for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; - rth = rcu_dereference(rth->u.dst.rt_next)) { + rth = rcu_dereference(rth->dst.rt_next)) { if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | (rth->fl.iif ^ iif) | rth->fl.oif | (rth->fl.fl4_tos ^ tos)) == 0 && rth->fl.mark == skb->mark && - net_eq(dev_net(rth->u.dst.dev), net) && + net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { if (noref) { - dst_use_noref(&rth->u.dst, jiffies); - skb_dst_set_noref(skb, &rth->u.dst); + dst_use_noref(&rth->dst, jiffies); + skb_dst_set_noref(skb, &rth->dst); } else { - dst_use(&rth->u.dst, jiffies); - skb_dst_set(skb, &rth->u.dst); + dst_use(&rth->dst, jiffies); + skb_dst_set(skb, &rth->dst); } RT_CACHE_STAT_INC(in_hit); rcu_read_unlock(); @@ -2317,7 +2314,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, } RT_CACHE_STAT_INC(in_hlist_search); } - rcu_read_unlock(); skip_cache: /* Multicast recognition logic is moved from route cache to here. @@ -2332,12 +2328,11 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, route cache entry is created eventually. */ if (ipv4_is_multicast(daddr)) { - struct in_device *in_dev; + struct in_device *in_dev = __in_dev_get_rcu(dev); - rcu_read_lock(); - if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { + if (in_dev) { int our = ip_check_mc(in_dev, daddr, saddr, - ip_hdr(skb)->protocol); + ip_hdr(skb)->protocol); if (our #ifdef CONFIG_IP_MROUTE || @@ -2345,15 +2340,18 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, IN_DEV_MFORWARD(in_dev)) #endif ) { + int res = ip_route_input_mc(skb, daddr, saddr, + tos, dev, our); rcu_read_unlock(); - return ip_route_input_mc(skb, daddr, saddr, - tos, dev, our); + return res; } } rcu_read_unlock(); return -EINVAL; } - return ip_route_input_slow(skb, daddr, saddr, tos, dev); + res = ip_route_input_slow(skb, daddr, saddr, tos, dev); + rcu_read_unlock(); + return res; } EXPORT_SYMBOL(ip_route_input_common); @@ -2415,12 +2413,12 @@ static int __mkroute_output(struct rtable **result, goto cleanup; } - atomic_set(&rth->u.dst.__refcnt, 1); - rth->u.dst.flags= DST_HOST; + atomic_set(&rth->dst.__refcnt, 1); + rth->dst.flags= DST_HOST; if (IN_DEV_CONF_GET(in_dev, NOXFRM)) - rth->u.dst.flags |= DST_NOXFRM; + rth->dst.flags |= DST_NOXFRM; if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) - rth->u.dst.flags |= DST_NOPOLICY; + rth->dst.flags |= DST_NOPOLICY; rth->fl.fl4_dst = oldflp->fl4_dst; rth->fl.fl4_tos = tos; @@ -2432,35 +2430,35 @@ static int __mkroute_output(struct rtable **result, rth->rt_iif = oldflp->oif ? : dev_out->ifindex; /* get references to the devices that are to be hold by the routing cache entry */ - rth->u.dst.dev = dev_out; + rth->dst.dev = dev_out; dev_hold(dev_out); rth->idev = in_dev_get(dev_out); rth->rt_gateway = fl->fl4_dst; rth->rt_spec_dst= fl->fl4_src; - rth->u.dst.output=ip_output; - rth->u.dst.obsolete = -1; + rth->dst.output=ip_output; + rth->dst.obsolete = -1; rth->rt_genid = rt_genid(dev_net(dev_out)); RT_CACHE_STAT_INC(out_slow_tot); if (flags & RTCF_LOCAL) { - rth->u.dst.input = ip_local_deliver; + rth->dst.input = ip_local_deliver; rth->rt_spec_dst = fl->fl4_dst; } if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { rth->rt_spec_dst = fl->fl4_src; if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK)) { - rth->u.dst.output = ip_mc_output; + rth->dst.output = ip_mc_output; RT_CACHE_STAT_INC(out_slow_mc); } #ifdef CONFIG_IP_MROUTE if (res->type == RTN_MULTICAST) { if (IN_DEV_MFORWARD(in_dev) && !ipv4_is_local_multicast(oldflp->fl4_dst)) { - rth->u.dst.input = ip_mr_input; - rth->u.dst.output = ip_mc_output; + rth->dst.input = ip_mr_input; + rth->dst.output = ip_mc_output; } } #endif @@ -2715,7 +2713,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, rcu_read_lock_bh(); for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth; - rth = rcu_dereference_bh(rth->u.dst.rt_next)) { + rth = rcu_dereference_bh(rth->dst.rt_next)) { if (rth->fl.fl4_dst == flp->fl4_dst && rth->fl.fl4_src == flp->fl4_src && rth->fl.iif == 0 && @@ -2723,9 +2721,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, rth->fl.mark == flp->mark && !((rth->fl.fl4_tos ^ flp->fl4_tos) & (IPTOS_RT_MASK | RTO_ONLINK)) && - net_eq(dev_net(rth->u.dst.dev), net) && + net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { - dst_use(&rth->u.dst, jiffies); + dst_use(&rth->dst, jiffies); RT_CACHE_STAT_INC(out_hit); rcu_read_unlock_bh(); *rp = rth; @@ -2738,7 +2736,6 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, slow_output: return ip_route_output_slow(net, rp, flp); } - EXPORT_SYMBOL_GPL(__ip_route_output_key); static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) @@ -2762,15 +2759,15 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi dst_alloc(&ipv4_dst_blackhole_ops); if (rt) { - struct dst_entry *new = &rt->u.dst; + struct dst_entry *new = &rt->dst; atomic_set(&new->__refcnt, 1); new->__use = 1; new->input = dst_discard; new->output = dst_discard; - memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); + memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); - new->dev = ort->u.dst.dev; + new->dev = ort->dst.dev; if (new->dev) dev_hold(new->dev); @@ -2794,7 +2791,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi dst_free(new); } - dst_release(&(*rp)->u.dst); + dst_release(&(*rp)->dst); *rp = rt; return (rt ? 0 : -ENOMEM); } @@ -2822,13 +2819,13 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp, return 0; } - EXPORT_SYMBOL_GPL(ip_route_output_flow); int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp) { return ip_route_output_flow(net, rp, flp, NULL, 0); } +EXPORT_SYMBOL(ip_route_output_key); static int rt_fill_info(struct net *net, struct sk_buff *skb, u32 pid, u32 seq, int event, @@ -2864,11 +2861,11 @@ static int rt_fill_info(struct net *net, r->rtm_src_len = 32; NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); } - if (rt->u.dst.dev) - NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); + if (rt->dst.dev) + NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); #ifdef CONFIG_NET_CLS_ROUTE - if (rt->u.dst.tclassid) - NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); + if (rt->dst.tclassid) + NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); #endif if (rt->fl.iif) NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); @@ -2878,12 +2875,16 @@ static int rt_fill_info(struct net *net, if (rt->rt_dst != rt->rt_gateway) NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); - if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) + if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) goto nla_put_failure; - error = rt->u.dst.error; - expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; + if (rt->fl.mark) + NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark); + + error = rt->dst.error; + expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; if (rt->peer) { + inet_peer_refcheck(rt->peer); id = atomic_read(&rt->peer->ip_id_count) & 0xffff; if (rt->peer->tcp_ts_stamp) { ts = rt->peer->tcp_ts; @@ -2914,7 +2915,7 @@ static int rt_fill_info(struct net *net, NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); } - if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage, + if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, expires, error) < 0) goto nla_put_failure; @@ -2935,6 +2936,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void __be32 src = 0; u32 iif; int err; + int mark; struct sk_buff *skb; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); @@ -2962,6 +2964,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0; iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; + mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; if (iif) { struct net_device *dev; @@ -2974,13 +2977,14 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void skb->protocol = htons(ETH_P_IP); skb->dev = dev; + skb->mark = mark; local_bh_disable(); err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); local_bh_enable(); rt = skb_rtable(skb); - if (err == 0 && rt->u.dst.error) - err = -rt->u.dst.error; + if (err == 0 && rt->dst.error) + err = -rt->dst.error; } else { struct flowi fl = { .nl_u = { @@ -2991,6 +2995,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void }, }, .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, + .mark = mark, }; err = ip_route_output_key(net, &rt, &fl); } @@ -2998,7 +3003,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void if (err) goto errout_free; - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); if (rtm->rtm_flags & RTM_F_NOTIFY) rt->rt_flags |= RTCF_NOTIFY; @@ -3034,12 +3039,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) continue; rcu_read_lock_bh(); for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt; - rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) { - if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) + rt = rcu_dereference_bh(rt->dst.rt_next), idx++) { + if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx) continue; if (rt_is_expired(rt)) continue; - skb_dst_set_noref(skb, &rt->u.dst); + skb_dst_set_noref(skb, &rt->dst); if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1, NLM_F_MULTI) <= 0) { @@ -3365,6 +3370,3 @@ void __init ip_static_sysctl_init(void) register_sysctl_paths(ipv4_path, ipv4_skeleton); } #endif - -EXPORT_SYMBOL(__ip_select_ident); -EXPORT_SYMBOL(ip_route_output_key); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 9f6b22206c527263fea9c56727967afd92b773bb..650cace2180d3598d2c4c10bf16c300506f3f833 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -18,8 +18,8 @@ #include #include -/* Timestamps: lowest 9 bits store TCP options */ -#define TSBITS 9 +/* Timestamps: lowest bits store TCP options */ +#define TSBITS 6 #define TSMASK (((__u32)1 << TSBITS) - 1) extern int sysctl_tcp_syncookies; @@ -58,7 +58,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, /* * when syncookies are in effect and tcp timestamps are enabled we encode - * tcp options in the lowest 9 bits of the timestamp value that will be + * tcp options in the lower bits of the timestamp value that will be * sent in the syn-ack. * Since subsequent timestamps use the normal tcp_time_stamp value, we * must make sure that the resulting initial timestamp is <= tcp_time_stamp. @@ -70,11 +70,10 @@ __u32 cookie_init_timestamp(struct request_sock *req) u32 options = 0; ireq = inet_rsk(req); - if (ireq->wscale_ok) { - options = ireq->snd_wscale; - options |= ireq->rcv_wscale << 4; - } - options |= ireq->sack_ok << 8; + + options = ireq->wscale_ok ? ireq->snd_wscale : 0xf; + options |= ireq->sack_ok << 4; + options |= ireq->ecn_ok << 5; ts = ts_now & ~TSMASK; ts |= options; @@ -138,23 +137,23 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, } /* - * This table has to be sorted and terminated with (__u16)-1. - * XXX generate a better table. - * Unresolved Issues: HIPPI with a 64k MSS is not well supported. + * MSS Values are taken from the 2009 paper + * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson: + * - values 1440 to 1460 accounted for 80% of observed mss values + * - values outside the 536-1460 range are rare (<0.2%). + * + * Table must be sorted. */ static __u16 const msstab[] = { - 64 - 1, - 256 - 1, - 512 - 1, - 536 - 1, - 1024 - 1, - 1440 - 1, - 1460 - 1, - 4312 - 1, - (__u16)-1 + 64, + 512, + 536, + 1024, + 1440, + 1460, + 4312, + 8960, }; -/* The number doesn't include the -1 terminator */ -#define NUM_MSS (ARRAY_SIZE(msstab) - 1) /* * Generate a syncookie. mssp points to the mss, which is returned @@ -169,10 +168,10 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) tcp_synq_overflow(sk); - /* XXX sort msstab[] by probability? Binary search? */ - for (mssind = 0; mss > msstab[mssind + 1]; mssind++) - ; - *mssp = msstab[mssind] + 1; + for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) + if (mss >= msstab[mssind]) + break; + *mssp = msstab[mssind]; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); @@ -202,7 +201,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) jiffies / (HZ * 60), COUNTER_TRIES); - return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; + return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; } static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, @@ -227,26 +226,38 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, * additional tcp options in the timestamp. * This extracts these options from the timestamp echo. * - * The lowest 4 bits are for snd_wscale - * The next 4 lsb are for rcv_wscale - * The next lsb is for sack_ok + * The lowest 4 bits store snd_wscale. + * next 2 bits indicate SACK and ECN support. + * + * return false if we decode an option that should not be. */ -void cookie_check_timestamp(struct tcp_options_received *tcp_opt) +bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok) { - /* echoed timestamp, 9 lowest bits contain options */ + /* echoed timestamp, lowest bits contain options */ u32 options = tcp_opt->rcv_tsecr & TSMASK; - tcp_opt->snd_wscale = options & 0xf; - options >>= 4; - tcp_opt->rcv_wscale = options & 0xf; + if (!tcp_opt->saw_tstamp) { + tcp_clear_options(tcp_opt); + return true; + } + + if (!sysctl_tcp_timestamps) + return false; tcp_opt->sack_ok = (options >> 4) & 0x1; + *ecn_ok = (options >> 5) & 1; + if (*ecn_ok && !sysctl_tcp_ecn) + return false; + + if (tcp_opt->sack_ok && !sysctl_tcp_sack) + return false; - if (tcp_opt->sack_ok) - tcp_sack_reset(tcp_opt); + if ((options & 0xf) == 0xf) + return true; /* no window scaling */ - if (tcp_opt->snd_wscale || tcp_opt->rcv_wscale) - tcp_opt->wscale_ok = 1; + tcp_opt->wscale_ok = 1; + tcp_opt->snd_wscale = options & 0xf; + return sysctl_tcp_window_scaling != 0; } EXPORT_SYMBOL(cookie_check_timestamp); @@ -265,8 +276,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, int mss; struct rtable *rt; __u8 rcv_wscale; + bool ecn_ok; - if (!sysctl_tcp_syncookies || !th->ack) + if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk) || @@ -281,8 +293,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, &hash_location, 0); - if (tcp_opt.saw_tstamp) - cookie_check_timestamp(&tcp_opt); + if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) + goto out; ret = NULL; req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ @@ -298,9 +310,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, ireq->rmt_port = th->source; ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; - ireq->ecn_ok = 0; + ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; - ireq->rcv_wscale = tcp_opt.rcv_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; @@ -354,15 +365,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, } /* Try to redo what tcp_v4_send_synack did. */ - req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); + req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, &req->rcv_wnd, &req->window_clamp, ireq->wscale_ok, &rcv_wscale, - dst_metric(&rt->u.dst, RTAX_INITRWND)); + dst_metric(&rt->dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; - ret = get_cookie_sock(sk, skb, req, &rt->u.dst); + ret = get_cookie_sock(sk, skb, req, &rt->dst); out: return ret; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 65afeaec15b7ebb85610650353e881aacb3e90aa..176e11aaea771795b21c0be6b1453b46c6349f0c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -315,7 +315,6 @@ struct tcp_splice_state { * is strict, actions are advisory and have some latency. */ int tcp_memory_pressure __read_mostly; - EXPORT_SYMBOL(tcp_memory_pressure); void tcp_enter_memory_pressure(struct sock *sk) @@ -325,7 +324,6 @@ void tcp_enter_memory_pressure(struct sock *sk) tcp_memory_pressure = 1; } } - EXPORT_SYMBOL(tcp_enter_memory_pressure); /* Convert seconds to retransmits based on initial and max timeout */ @@ -460,6 +458,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) } return mask; } +EXPORT_SYMBOL(tcp_poll); int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { @@ -508,10 +507,11 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) return put_user(answ, (int __user *)arg); } +EXPORT_SYMBOL(tcp_ioctl); static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { - TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; + TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; tp->pushed_seq = tp->write_seq; } @@ -527,7 +527,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) skb->csum = 0; tcb->seq = tcb->end_seq = tp->write_seq; - tcb->flags = TCPCB_FLAG_ACK; + tcb->flags = TCPHDR_ACK; tcb->sacked = 0; skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); @@ -676,6 +676,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, return ret; } +EXPORT_SYMBOL(tcp_splice_read); struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) { @@ -816,7 +817,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse skb_shinfo(skb)->gso_segs = 0; if (!copied) - TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; + TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; copied += copy; poffset += copy; @@ -857,15 +858,15 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse return sk_stream_error(sk, flags, err); } -ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, - size_t size, int flags) +int tcp_sendpage(struct sock *sk, struct page *page, int offset, + size_t size, int flags) { ssize_t res; - struct sock *sk = sock->sk; if (!(sk->sk_route_caps & NETIF_F_SG) || !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) - return sock_no_sendpage(sock, page, offset, size, flags); + return sock_no_sendpage(sk->sk_socket, page, offset, size, + flags); lock_sock(sk); TCP_CHECK_TIMER(sk); @@ -874,6 +875,7 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, release_sock(sk); return res; } +EXPORT_SYMBOL(tcp_sendpage); #define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_OFF(sk) (sk->sk_sndmsg_off) @@ -898,10 +900,9 @@ static inline int select_size(struct sock *sk, int sg) return tmp; } -int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, +int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { - struct sock *sk = sock->sk; struct iovec *iov; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -1062,7 +1063,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, } if (!copied) - TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; + TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; @@ -1122,6 +1123,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, release_sock(sk); return err; } +EXPORT_SYMBOL(tcp_sendmsg); /* * Handle reading urgent data. BSD has very simple semantics for @@ -1381,6 +1383,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_cleanup_rbuf(sk, copied); return copied; } +EXPORT_SYMBOL(tcp_read_sock); /* * This routine copies from a sock struct into the user buffer. @@ -1775,6 +1778,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, err = tcp_recv_urg(sk, msg, len, flags); goto out; } +EXPORT_SYMBOL(tcp_recvmsg); void tcp_set_state(struct sock *sk, int state) { @@ -1867,6 +1871,7 @@ void tcp_shutdown(struct sock *sk, int how) tcp_send_fin(sk); } } +EXPORT_SYMBOL(tcp_shutdown); void tcp_close(struct sock *sk, long timeout) { @@ -1899,6 +1904,10 @@ void tcp_close(struct sock *sk, long timeout) sk_mem_reclaim(sk); + /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ + if (sk->sk_state == TCP_CLOSE) + goto adjudge_to_death; + /* As outlined in RFC 2525, section 2.17, we send a RST here because * data was lost. To witness the awful effects of the old behavior of * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk @@ -2026,6 +2035,7 @@ void tcp_close(struct sock *sk, long timeout) local_bh_enable(); sock_put(sk); } +EXPORT_SYMBOL(tcp_close); /* These states need RST on ABORT according to RFC793 */ @@ -2099,6 +2109,7 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_error_report(sk); return err; } +EXPORT_SYMBOL(tcp_disconnect); /* * Socket option code for TCP. @@ -2176,6 +2187,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, GFP_KERNEL); if (cvp == NULL) return -ENOMEM; + + kref_init(&cvp->kref); } lock_sock(sk); tp->rx_opt.cookie_in_always = @@ -2190,12 +2203,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level, */ kref_put(&tp->cookie_values->kref, tcp_cookie_values_release); - kref_init(&cvp->kref); - tp->cookie_values = cvp; } else { cvp = tp->cookie_values; } } + if (cvp != NULL) { cvp->cookie_desired = ctd.tcpct_cookie_desired; @@ -2209,6 +2221,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, cvp->s_data_desired = ctd.tcpct_s_data_desired; cvp->s_data_constant = 0; /* false */ } + + tp->cookie_values = cvp; } release_sock(sk); return err; @@ -2397,6 +2411,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); } +EXPORT_SYMBOL(tcp_setsockopt); #ifdef CONFIG_COMPAT int compat_tcp_setsockopt(struct sock *sk, int level, int optname, @@ -2407,7 +2422,6 @@ int compat_tcp_setsockopt(struct sock *sk, int level, int optname, optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); } - EXPORT_SYMBOL(compat_tcp_setsockopt); #endif @@ -2473,7 +2487,6 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_total_retrans = tp->total_retrans; } - EXPORT_SYMBOL_GPL(tcp_get_info); static int do_tcp_getsockopt(struct sock *sk, int level, @@ -2591,6 +2604,12 @@ static int do_tcp_getsockopt(struct sock *sk, int level, return -EFAULT; return 0; } + case TCP_THIN_LINEAR_TIMEOUTS: + val = tp->thin_lto; + break; + case TCP_THIN_DUPACK: + val = tp->thin_dupack; + break; default: return -ENOPROTOOPT; } @@ -2612,6 +2631,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, optval, optlen); return do_tcp_getsockopt(sk, level, optname, optval, optlen); } +EXPORT_SYMBOL(tcp_getsockopt); #ifdef CONFIG_COMPAT int compat_tcp_getsockopt(struct sock *sk, int level, int optname, @@ -2622,7 +2642,6 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname, optval, optlen); return do_tcp_getsockopt(sk, level, optname, optval, optlen); } - EXPORT_SYMBOL(compat_tcp_getsockopt); #endif @@ -2859,7 +2878,6 @@ void tcp_free_md5sig_pool(void) if (pool) __tcp_free_md5sig_pool(pool); } - EXPORT_SYMBOL(tcp_free_md5sig_pool); static struct tcp_md5sig_pool * __percpu * @@ -2935,7 +2953,6 @@ struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) } return pool; } - EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@ -2959,7 +2976,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) spin_unlock(&tcp_md5sig_pool_lock); if (p) - return *per_cpu_ptr(p, smp_processor_id()); + return *this_cpu_ptr(p); local_bh_enable(); return NULL; @@ -2987,7 +3004,6 @@ int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, th->check = old_checksum; return err; } - EXPORT_SYMBOL(tcp_md5_hash_header); int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, @@ -3000,6 +3016,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, const unsigned head_data_len = skb_headlen(skb) > header_len ? skb_headlen(skb) - header_len : 0; const struct skb_shared_info *shi = skb_shinfo(skb); + struct sk_buff *frag_iter; sg_init_table(&sg, 1); @@ -3014,9 +3031,12 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, return 1; } + skb_walk_frags(skb, frag_iter) + if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) + return 1; + return 0; } - EXPORT_SYMBOL(tcp_md5_hash_skb_data); int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) @@ -3026,7 +3046,6 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) sg_init_one(&sg, key->key, key->keylen); return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); } - EXPORT_SYMBOL(tcp_md5_hash_key); #endif @@ -3298,16 +3317,3 @@ void __init tcp_init(void) tcp_secret_retiring = &tcp_secret_two; tcp_secret_secondary = &tcp_secret_two; } - -EXPORT_SYMBOL(tcp_close); -EXPORT_SYMBOL(tcp_disconnect); -EXPORT_SYMBOL(tcp_getsockopt); -EXPORT_SYMBOL(tcp_ioctl); -EXPORT_SYMBOL(tcp_poll); -EXPORT_SYMBOL(tcp_read_sock); -EXPORT_SYMBOL(tcp_recvmsg); -EXPORT_SYMBOL(tcp_sendmsg); -EXPORT_SYMBOL(tcp_splice_read); -EXPORT_SYMBOL(tcp_sendpage); -EXPORT_SYMBOL(tcp_setsockopt); -EXPORT_SYMBOL(tcp_shutdown); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 548d575e6cc684673ef9c7a7a613e2f444ec509c..3c426cb318e7061795c7abce24189fbb7ea443f0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -78,10 +78,13 @@ int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; +EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_ecn __read_mostly = 2; +EXPORT_SYMBOL(sysctl_tcp_ecn); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 2; +EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; @@ -419,6 +422,7 @@ void tcp_initialize_rcv_mss(struct sock *sk) inet_csk(sk)->icsk_ack.rcv_mss = hint; } +EXPORT_SYMBOL(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * @@ -2938,6 +2942,7 @@ void tcp_simple_retransmit(struct sock *sk) } tcp_xmit_retransmit_queue(sk); } +EXPORT_SYMBOL(tcp_simple_retransmit); /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, @@ -3286,7 +3291,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, * connection startup slow start one packet too * quickly. This is severely frowned upon behavior. */ - if (!(scb->flags & TCPCB_FLAG_SYN)) { + if (!(scb->flags & TCPHDR_SYN)) { flag |= FLAG_DATA_ACKED; } else { flag |= FLAG_SYN_ACKED; @@ -3858,6 +3863,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, } } } +EXPORT_SYMBOL(tcp_parse_options); static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th) { @@ -3931,6 +3937,7 @@ u8 *tcp_parse_md5sig_option(struct tcphdr *th) } return NULL; } +EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif static inline void tcp_store_ts_recent(struct tcp_sock *tp) @@ -5432,6 +5439,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, __kfree_skb(skb); return 0; } +EXPORT_SYMBOL(tcp_rcv_established); static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len) @@ -5931,14 +5939,4 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } return 0; } - -EXPORT_SYMBOL(sysctl_tcp_ecn); -EXPORT_SYMBOL(sysctl_tcp_reordering); -EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); -EXPORT_SYMBOL(tcp_parse_options); -#ifdef CONFIG_TCP_MD5SIG -EXPORT_SYMBOL(tcp_parse_md5sig_option); -#endif -EXPORT_SYMBOL(tcp_rcv_established); EXPORT_SYMBOL(tcp_rcv_state_process); -EXPORT_SYMBOL(tcp_initialize_rcv_mss); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fe193e53af447f49fe05a8cba7b495c38f421be2..020766292bb01009201ec11e60bb0e214854414c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -84,6 +84,7 @@ int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; +EXPORT_SYMBOL(sysctl_tcp_low_latency); #ifdef CONFIG_TCP_MD5SIG @@ -100,6 +101,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) #endif struct inet_hashinfo tcp_hashinfo; +EXPORT_SYMBOL(tcp_hashinfo); static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) { @@ -139,7 +141,6 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) return 0; } - EXPORT_SYMBOL_GPL(tcp_twsk_unique); /* This will initiate an outgoing connection. */ @@ -204,10 +205,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * TIME-WAIT * and initialize rx_opt.ts_recent from it, * when trying new connection. */ - if (peer != NULL && - (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { - tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; - tp->rx_opt.ts_recent = peer->tcp_ts; + if (peer) { + inet_peer_refcheck(peer); + if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { + tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; + tp->rx_opt.ts_recent = peer->tcp_ts; + } } } @@ -237,7 +240,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; - sk_setup_caps(sk, &rt->u.dst); + sk_setup_caps(sk, &rt->dst); if (!tp->write_seq) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, @@ -265,6 +268,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_dport = 0; return err; } +EXPORT_SYMBOL(tcp_v4_connect); /* * This routine does path mtu discovery as defined in RFC1191. @@ -543,6 +547,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); } +EXPORT_SYMBOL(tcp_v4_send_check); int tcp_v4_gso_send_check(struct sk_buff *skb) { @@ -793,19 +798,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) kfree(inet_rsk(req)->opt); } -#ifdef CONFIG_SYN_COOKIES -static void syn_flood_warning(struct sk_buff *skb) +static void syn_flood_warning(const struct sk_buff *skb) { - static unsigned long warntime; + const char *msg; - if (time_after(jiffies, (warntime + HZ * 60))) { - warntime = jiffies; - printk(KERN_INFO - "possible SYN flooding on port %d. Sending cookies.\n", - ntohs(tcp_hdr(skb)->dest)); - } -} +#ifdef CONFIG_SYN_COOKIES + if (sysctl_tcp_syncookies) + msg = "Sending cookies"; + else #endif + msg = "Dropping request"; + + pr_info("TCP: Possible SYN flooding on port %d. %s.\n", + ntohs(tcp_hdr(skb)->dest), msg); +} /* * Save and compile IPv4 options into the request_sock if needed. @@ -857,7 +863,6 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, { return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); } - EXPORT_SYMBOL(tcp_v4_md5_lookup); static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, @@ -924,7 +929,6 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, } return 0; } - EXPORT_SYMBOL(tcp_v4_md5_do_add); static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, @@ -962,7 +966,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) } return -ENOENT; } - EXPORT_SYMBOL(tcp_v4_md5_do_del); static void tcp_v4_clear_md5_list(struct sock *sk) @@ -1135,7 +1138,6 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, memset(md5_hash, 0, 16); return 1; } - EXPORT_SYMBOL(tcp_v4_md5_hash_skb); static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) @@ -1243,6 +1245,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * evidently real one. */ if (inet_csk_reqsk_queue_is_full(sk) && !isn) { + if (net_ratelimit()) + syn_flood_warning(skb); #ifdef CONFIG_SYN_COOKIES if (sysctl_tcp_syncookies) { want_cookie = 1; @@ -1323,15 +1327,12 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; - if (!want_cookie) + if (!want_cookie || tmp_opt.tstamp_ok) TCP_ECN_create_request(req, tcp_hdr(skb)); if (want_cookie) { -#ifdef CONFIG_SYN_COOKIES - syn_flood_warning(skb); - req->cookie_ts = tmp_opt.tstamp_ok; -#endif isn = cookie_v4_init_sequence(sk, skb, &req->mss); + req->cookie_ts = tmp_opt.tstamp_ok; } else if (!isn) { struct inet_peer *peer = NULL; @@ -1349,6 +1350,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) (dst = inet_csk_route_req(sk, req)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL && peer->v4daddr == saddr) { + inet_peer_refcheck(peer); if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && (s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { @@ -1393,6 +1395,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) drop: return 0; } +EXPORT_SYMBOL(tcp_v4_conn_request); /* @@ -1478,6 +1481,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, dst_release(dst); return NULL; } +EXPORT_SYMBOL(tcp_v4_syn_recv_sock); static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { @@ -1504,7 +1508,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) } #ifdef CONFIG_SYN_COOKIES - if (!th->rst && !th->syn && th->ack) + if (!th->syn) sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); #endif return sk; @@ -1607,6 +1611,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; } +EXPORT_SYMBOL(tcp_v4_do_rcv); /* * From tcp_input.c @@ -1793,6 +1798,7 @@ int tcp_v4_remember_stamp(struct sock *sk) return 0; } +EXPORT_SYMBOL(tcp_v4_remember_stamp); int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) { @@ -1832,6 +1838,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = { .compat_getsockopt = compat_ip_getsockopt, #endif }; +EXPORT_SYMBOL(ipv4_specific); #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { @@ -1960,7 +1967,6 @@ void tcp_v4_destroy_sock(struct sock *sk) percpu_counter_dec(&tcp_sockets_allocated); } - EXPORT_SYMBOL(tcp_v4_destroy_sock); #ifdef CONFIG_PROC_FS @@ -1978,6 +1984,11 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; } +/* + * Get next listener socket follow cur. If cur is NULL, get first socket + * starting from bucket given in st->bucket; when st->bucket is zero the + * very first socket in the hash table is returned. + */ static void *listening_get_next(struct seq_file *seq, void *cur) { struct inet_connection_sock *icsk; @@ -1988,14 +1999,15 @@ static void *listening_get_next(struct seq_file *seq, void *cur) struct net *net = seq_file_net(seq); if (!sk) { - st->bucket = 0; - ilb = &tcp_hashinfo.listening_hash[0]; + ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock_bh(&ilb->lock); sk = sk_nulls_head(&ilb->head); + st->offset = 0; goto get_sk; } ilb = &tcp_hashinfo.listening_hash[st->bucket]; ++st->num; + ++st->offset; if (st->state == TCP_SEQ_STATE_OPENREQ) { struct request_sock *req = cur; @@ -2010,6 +2022,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) } req = req->dl_next; } + st->offset = 0; if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) break; get_req: @@ -2045,6 +2058,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); } spin_unlock_bh(&ilb->lock); + st->offset = 0; if (++st->bucket < INET_LHTABLE_SIZE) { ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock_bh(&ilb->lock); @@ -2058,7 +2072,12 @@ static void *listening_get_next(struct seq_file *seq, void *cur) static void *listening_get_idx(struct seq_file *seq, loff_t *pos) { - void *rc = listening_get_next(seq, NULL); + struct tcp_iter_state *st = seq->private; + void *rc; + + st->bucket = 0; + st->offset = 0; + rc = listening_get_next(seq, NULL); while (rc && *pos) { rc = listening_get_next(seq, rc); @@ -2073,13 +2092,18 @@ static inline int empty_bucket(struct tcp_iter_state *st) hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); } +/* + * Get first established socket starting from bucket given in st->bucket. + * If st->bucket is zero, the very first socket in the hash is returned. + */ static void *established_get_first(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; - for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { + st->offset = 0; + for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; struct inet_timewait_sock *tw; @@ -2124,6 +2148,7 @@ static void *established_get_next(struct seq_file *seq, void *cur) struct net *net = seq_file_net(seq); ++st->num; + ++st->offset; if (st->state == TCP_SEQ_STATE_TIME_WAIT) { tw = cur; @@ -2140,6 +2165,7 @@ static void *established_get_next(struct seq_file *seq, void *cur) st->state = TCP_SEQ_STATE_ESTABLISHED; /* Look for next non empty bucket */ + st->offset = 0; while (++st->bucket <= tcp_hashinfo.ehash_mask && empty_bucket(st)) ; @@ -2167,7 +2193,11 @@ static void *established_get_next(struct seq_file *seq, void *cur) static void *established_get_idx(struct seq_file *seq, loff_t pos) { - void *rc = established_get_first(seq); + struct tcp_iter_state *st = seq->private; + void *rc; + + st->bucket = 0; + rc = established_get_first(seq); while (rc && pos) { rc = established_get_next(seq, rc); @@ -2192,24 +2222,72 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) return rc; } +static void *tcp_seek_last_pos(struct seq_file *seq) +{ + struct tcp_iter_state *st = seq->private; + int offset = st->offset; + int orig_num = st->num; + void *rc = NULL; + + switch (st->state) { + case TCP_SEQ_STATE_OPENREQ: + case TCP_SEQ_STATE_LISTENING: + if (st->bucket >= INET_LHTABLE_SIZE) + break; + st->state = TCP_SEQ_STATE_LISTENING; + rc = listening_get_next(seq, NULL); + while (offset-- && rc) + rc = listening_get_next(seq, rc); + if (rc) + break; + st->bucket = 0; + /* Fallthrough */ + case TCP_SEQ_STATE_ESTABLISHED: + case TCP_SEQ_STATE_TIME_WAIT: + st->state = TCP_SEQ_STATE_ESTABLISHED; + if (st->bucket > tcp_hashinfo.ehash_mask) + break; + rc = established_get_first(seq); + while (offset-- && rc) + rc = established_get_next(seq, rc); + } + + st->num = orig_num; + + return rc; +} + static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; + void *rc; + + if (*pos && *pos == st->last_pos) { + rc = tcp_seek_last_pos(seq); + if (rc) + goto out; + } + st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; - return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; + st->bucket = 0; + st->offset = 0; + rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; + +out: + st->last_pos = *pos; + return rc; } static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct tcp_iter_state *st = seq->private; void *rc = NULL; - struct tcp_iter_state *st; if (v == SEQ_START_TOKEN) { rc = tcp_get_idx(seq, 0); goto out; } - st = seq->private; switch (st->state) { case TCP_SEQ_STATE_OPENREQ: @@ -2217,6 +2295,8 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) rc = listening_get_next(seq, v); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; + st->bucket = 0; + st->offset = 0; rc = established_get_first(seq); } break; @@ -2227,6 +2307,7 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) } out: ++*pos; + st->last_pos = *pos; return rc; } @@ -2265,6 +2346,7 @@ static int tcp_seq_open(struct inode *inode, struct file *file) s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; + s->last_pos = 0; return 0; } @@ -2288,11 +2370,13 @@ int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) rc = -ENOMEM; return rc; } +EXPORT_SYMBOL(tcp_proc_register); void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) { proc_net_remove(net, afinfo->name); } +EXPORT_SYMBOL(tcp_proc_unregister); static void get_openreq4(struct sock *sk, struct request_sock *req, struct seq_file *f, int i, int uid, int *len) @@ -2516,6 +2600,8 @@ struct proto tcp_prot = { .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, + .sendmsg = tcp_sendmsg, + .sendpage = tcp_sendpage, .backlog_rcv = tcp_v4_do_rcv, .hash = inet_hash, .unhash = inet_unhash, @@ -2534,11 +2620,13 @@ struct proto tcp_prot = { .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, + .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif }; +EXPORT_SYMBOL(tcp_prot); static int __net_init tcp_sk_init(struct net *net) @@ -2569,20 +2657,3 @@ void __init tcp_v4_init(void) if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); } - -EXPORT_SYMBOL(ipv4_specific); -EXPORT_SYMBOL(tcp_hashinfo); -EXPORT_SYMBOL(tcp_prot); -EXPORT_SYMBOL(tcp_v4_conn_request); -EXPORT_SYMBOL(tcp_v4_connect); -EXPORT_SYMBOL(tcp_v4_do_rcv); -EXPORT_SYMBOL(tcp_v4_remember_stamp); -EXPORT_SYMBOL(tcp_v4_send_check); -EXPORT_SYMBOL(tcp_v4_syn_recv_sock); - -#ifdef CONFIG_PROC_FS -EXPORT_SYMBOL(tcp_proc_register); -EXPORT_SYMBOL(tcp_proc_unregister); -#endif -EXPORT_SYMBOL(sysctl_tcp_low_latency); - diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 794c2e122a41f2fafc08e60282f32a5485c8e87b..f25b56cb85cbd1b7a743bd7348158afff332811d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -47,7 +47,6 @@ struct inet_timewait_death_row tcp_death_row = { .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0, (unsigned long)&tcp_death_row), }; - EXPORT_SYMBOL_GPL(tcp_death_row); static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) @@ -262,6 +261,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, inet_twsk_put(tw); return TCP_TW_SUCCESS; } +EXPORT_SYMBOL(tcp_timewait_state_process); /* * Move a socket to time-wait or dead fin-wait-2 state. @@ -362,7 +362,6 @@ void tcp_twsk_destructor(struct sock *sk) tcp_free_md5sig_pool(); #endif } - EXPORT_SYMBOL_GPL(tcp_twsk_destructor); static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, @@ -510,6 +509,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, } return newsk; } +EXPORT_SYMBOL(tcp_create_openreq_child); /* * Process an incoming packet for SYN_RECV sockets represented @@ -706,6 +706,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, inet_csk_reqsk_queue_drop(sk, req, prev); return NULL; } +EXPORT_SYMBOL(tcp_check_req); /* * Queue segment on the new socket if the new socket is active, @@ -737,8 +738,4 @@ int tcp_child_process(struct sock *parent, struct sock *child, sock_put(child); return ret; } - -EXPORT_SYMBOL(tcp_check_req); EXPORT_SYMBOL(tcp_child_process); -EXPORT_SYMBOL(tcp_create_openreq_child); -EXPORT_SYMBOL(tcp_timewait_state_process); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7ed9dc1042d1930a7eb2107def30642cb367fa92..de3bd84585881f99f8a23eb28fa9f380516d087c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -247,6 +247,7 @@ void tcp_select_initial_window(int __space, __u32 mss, /* Set the clamp no higher than max representable value */ (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); } +EXPORT_SYMBOL(tcp_select_initial_window); /* Chose a new window to advertise, update state in tcp_sock for the * socket, and return result with RFC1323 scaling applied. The return @@ -294,9 +295,9 @@ static u16 tcp_select_window(struct sock *sk) /* Packet ECN state for a SYN-ACK */ static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) { - TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; + TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR; if (!(tp->ecn_flags & TCP_ECN_OK)) - TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; + TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE; } /* Packet ECN state for a SYN. */ @@ -306,7 +307,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) tp->ecn_flags = 0; if (sysctl_tcp_ecn == 1) { - TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; + TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR; tp->ecn_flags = TCP_ECN_OK; } } @@ -361,7 +362,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) skb_shinfo(skb)->gso_type = 0; TCP_SKB_CB(skb)->seq = seq; - if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN)) + if (flags & (TCPHDR_SYN | TCPHDR_FIN)) seq++; TCP_SKB_CB(skb)->end_seq = seq; } @@ -820,7 +821,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcb = TCP_SKB_CB(skb); memset(&opts, 0, sizeof(opts)); - if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) + if (unlikely(tcb->flags & TCPHDR_SYN)) tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); else tcp_options_size = tcp_established_options(sk, skb, &opts, @@ -843,7 +844,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->flags); - if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { + if (unlikely(tcb->flags & TCPHDR_SYN)) { /* RFC1323: The window in SYN & SYN/ACK segments * is never scaled. */ @@ -866,7 +867,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, } tcp_options_write((__be32 *)(th + 1), tp, &opts); - if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) + if (likely((tcb->flags & TCPHDR_SYN) == 0)) TCP_ECN_send(sk, skb, tcp_header_size); #ifdef CONFIG_TCP_MD5SIG @@ -880,7 +881,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, icsk->icsk_af_ops->send_check(sk, skb); - if (likely(tcb->flags & TCPCB_FLAG_ACK)) + if (likely(tcb->flags & TCPHDR_ACK)) tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); if (skb->len != tcp_header_size) @@ -1023,7 +1024,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, /* PSH and FIN should only be set in the second packet. */ flags = TCP_SKB_CB(skb)->flags; - TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); + TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->flags = flags; TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; @@ -1189,6 +1190,7 @@ void tcp_mtup_init(struct sock *sk) icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); icsk->icsk_mtup.probe_size = 0; } +EXPORT_SYMBOL(tcp_mtup_init); /* This function synchronize snd mss to current pmtu/exthdr set. @@ -1232,6 +1234,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) return mss_now; } +EXPORT_SYMBOL(tcp_sync_mss); /* Compute the current effective MSS, taking SACKs and IP options, * and even PMTU discovery events into account. @@ -1328,8 +1331,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, u32 in_flight, cwnd; /* Don't be strict about the congestion window for the final FIN. */ - if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && - tcp_skb_pcount(skb) == 1) + if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1) return 1; in_flight = tcp_packets_in_flight(tp); @@ -1398,7 +1400,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, * Nagle can be ignored during F-RTO too (see RFC4138). */ if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || - (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) + (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)) return 1; if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) @@ -1461,7 +1463,7 @@ int tcp_may_send_now(struct sock *sk) * packet has never been sent out before (and thus is not cloned). */ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, - unsigned int mss_now) + unsigned int mss_now, gfp_t gfp) { struct sk_buff *buff; int nlen = skb->len - len; @@ -1471,7 +1473,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, if (skb->len != skb->data_len) return tcp_fragment(sk, skb, len, mss_now); - buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC); + buff = sk_stream_alloc_skb(sk, 0, gfp); if (unlikely(buff == NULL)) return -ENOMEM; @@ -1487,7 +1489,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, /* PSH and FIN should only be set in the second packet. */ flags = TCP_SKB_CB(skb)->flags; - TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); + TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->flags = flags; /* This packet was never sent out yet, so no SACK bits. */ @@ -1518,7 +1520,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) const struct inet_connection_sock *icsk = inet_csk(sk); u32 send_win, cong_win, limit, in_flight; - if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) + if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) goto send_now; if (icsk->icsk_ca_state != TCP_CA_Open) @@ -1644,7 +1646,7 @@ static int tcp_mtu_probe(struct sock *sk) TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; - TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; + TCP_SKB_CB(nskb)->flags = TCPHDR_ACK; TCP_SKB_CB(nskb)->sacked = 0; nskb->csum = 0; nskb->ip_summed = skb->ip_summed; @@ -1669,7 +1671,7 @@ static int tcp_mtu_probe(struct sock *sk) sk_wmem_free_skb(sk, skb); } else { TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & - ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); + ~(TCPHDR_FIN|TCPHDR_PSH); if (!skb_shinfo(skb)->nr_frags) { skb_pull(skb, copy); if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1769,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, cwnd_quota); if (skb->len > limit && - unlikely(tso_fragment(sk, skb, limit, mss_now))) + unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) break; TCP_SKB_CB(skb)->when = tcp_time_stamp; @@ -2020,7 +2022,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, if (!sysctl_tcp_retrans_collapse) return; - if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) + if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN) return; tcp_for_write_queue_from_safe(skb, tmp, sk) { @@ -2112,7 +2114,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) * since it is cheap to do so and saves bytes on the network. */ if (skb->len > 0 && - (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && + (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { if (!pskb_trim(skb, 0)) { /* Reuse, even though it does some unnecessary work */ @@ -2304,7 +2306,7 @@ void tcp_send_fin(struct sock *sk) mss_now = tcp_current_mss(sk); if (tcp_send_head(sk) != NULL) { - TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; + TCP_SKB_CB(skb)->flags |= TCPHDR_FIN; TCP_SKB_CB(skb)->end_seq++; tp->write_seq++; } else { @@ -2321,7 +2323,7 @@ void tcp_send_fin(struct sock *sk) skb_reserve(skb, MAX_TCP_HEADER); /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ tcp_init_nondata_skb(skb, tp->write_seq, - TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); + TCPHDR_ACK | TCPHDR_FIN); tcp_queue_skb(sk, skb); } __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); @@ -2346,7 +2348,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, MAX_TCP_HEADER); tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), - TCPCB_FLAG_ACK | TCPCB_FLAG_RST); + TCPHDR_ACK | TCPHDR_RST); /* Send it off. */ TCP_SKB_CB(skb)->when = tcp_time_stamp; if (tcp_transmit_skb(sk, skb, 0, priority)) @@ -2366,11 +2368,11 @@ int tcp_send_synack(struct sock *sk) struct sk_buff *skb; skb = tcp_write_queue_head(sk); - if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) { + if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) { printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); return -EFAULT; } - if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) { + if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) { if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); if (nskb == NULL) @@ -2384,7 +2386,7 @@ int tcp_send_synack(struct sock *sk) skb = nskb; } - TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; + TCP_SKB_CB(skb)->flags |= TCPHDR_ACK; TCP_ECN_send_synack(tcp_sk(sk), skb); } TCP_SKB_CB(skb)->when = tcp_time_stamp; @@ -2463,7 +2465,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, * not even correctly set) */ tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, - TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); + TCPHDR_SYN | TCPHDR_ACK); if (OPTION_COOKIE_EXTENSION & opts.options) { if (s_data_desired) { @@ -2518,6 +2520,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, return skb; } +EXPORT_SYMBOL(tcp_make_synack); /* Do all connect socket setups that can be done AF independent. */ static void tcp_connect_init(struct sock *sk) @@ -2595,7 +2598,7 @@ int tcp_connect(struct sock *sk) skb_reserve(buff, MAX_TCP_HEADER); tp->snd_nxt = tp->write_seq; - tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN); + tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); TCP_ECN_send_syn(sk, buff); /* Send it off. */ @@ -2620,6 +2623,7 @@ int tcp_connect(struct sock *sk) inet_csk(sk)->icsk_rto, TCP_RTO_MAX); return 0; } +EXPORT_SYMBOL(tcp_connect); /* Send out a delayed ack, the caller does the policy checking * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() @@ -2701,7 +2705,7 @@ void tcp_send_ack(struct sock *sk) /* Reserve space for headers and prepare control bits. */ skb_reserve(buff, MAX_TCP_HEADER); - tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK); + tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); /* Send it off, this clears delayed acks for us. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; @@ -2735,7 +2739,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) * end to send an ack. Don't queue or clone SKB, just * send it. */ - tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK); + tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); TCP_SKB_CB(skb)->when = tcp_time_stamp; return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); } @@ -2765,13 +2769,13 @@ int tcp_write_wakeup(struct sock *sk) if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || skb->len > mss) { seg_size = min(seg_size, mss); - TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; + TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; if (tcp_fragment(sk, skb, seg_size, mss)) return -1; } else if (!tcp_skb_pcount(skb)) tcp_set_skb_tso_segs(sk, skb, mss); - TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; + TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; TCP_SKB_CB(skb)->when = tcp_time_stamp; err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (!err) @@ -2824,10 +2828,3 @@ void tcp_send_probe0(struct sock *sk) TCP_RTO_MAX); } } - -EXPORT_SYMBOL(tcp_select_initial_window); -EXPORT_SYMBOL(tcp_connect); -EXPORT_SYMBOL(tcp_make_synack); -EXPORT_SYMBOL(tcp_simple_retransmit); -EXPORT_SYMBOL(tcp_sync_mss); -EXPORT_SYMBOL(tcp_mtup_init); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 440a5c6004f6ac8d1b2d8404d641d0a174b241b4..808bb920c9f5e67249f6747c90cdeb7dddd9ddea 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -41,7 +41,6 @@ void tcp_init_xmit_timers(struct sock *sk) inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, &tcp_keepalive_timer); } - EXPORT_SYMBOL(tcp_init_xmit_timers); static void tcp_write_err(struct sock *sk) diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c index 3b3813cc80b9cb7e1c5afcf2f7acfb9dce32f50b..59186ca7808a20fcf6eda607491bcaba60cf2263 100644 --- a/net/ipv4/tunnel4.c +++ b/net/ipv4/tunnel4.c @@ -48,7 +48,6 @@ int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) return ret; } - EXPORT_SYMBOL(xfrm4_tunnel_register); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) @@ -72,7 +71,6 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) return ret; } - EXPORT_SYMBOL(xfrm4_tunnel_deregister); static int tunnel4_rcv(struct sk_buff *skb) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index eec4ff456e332e0b086d5451478fa5874213e0d1..32e0bef60d0afdbedd3d4cbdd3c57a9f9beafcdc 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -914,7 +914,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, !sock_flag(sk, SOCK_BROADCAST)) goto out; if (connected) - sk_dst_set(sk, dst_clone(&rt->u.dst)); + sk_dst_set(sk, dst_clone(&rt->dst)); } if (msg->msg_flags&MSG_CONFIRM) @@ -978,7 +978,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, return err; do_confirm: - dst_confirm(&rt->u.dst); + dst_confirm(&rt->dst); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 6610bf76369fadc5c22dafd76fd7552bf50c3277..ab76aa928fa98d6f1b8a8f6371d2b898ebdccb24 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -58,6 +58,7 @@ struct proto udplite_prot = { .compat_getsockopt = compat_udp_getsockopt, #endif }; +EXPORT_SYMBOL(udplite_prot); static struct inet_protosw udplite4_protosw = { .type = SOCK_DGRAM, @@ -127,5 +128,3 @@ void __init udplite4_register(void) out_register_err: printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); } - -EXPORT_SYMBOL(udplite_prot); diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index ad8fbb871aa068cc9d299aa15580b06f45fc7187..06814b6216dc1bc71fedc9924b23d74b817a38d8 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -163,5 +163,4 @@ int xfrm4_rcv(struct sk_buff *skb) { return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0); } - EXPORT_SYMBOL(xfrm4_rcv); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 23883a48ebfb37e0fa5b210d9207e8ba5c12b733..869078d4eeb957a4982ab62e5d408e8fbf42117c 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -37,7 +37,7 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, fl.fl4_src = saddr->a4; err = __ip_route_output_key(net, &rt, &fl); - dst = &rt->u.dst; + dst = &rt->dst; if (err) dst = ERR_PTR(err); return dst; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 784f34d11fdd2f5672949a8f31b302377e6e6fc7..ab70a3fbcafafee9a080ea0f8c6eab2ce04fc37e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -121,8 +121,6 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev) static int __ipv6_regen_rndid(struct inet6_dev *idev); static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); static void ipv6_regen_rndid(unsigned long data); - -static int desync_factor = MAX_DESYNC_FACTOR * HZ; #endif static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); @@ -284,13 +282,16 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp, static int snmp6_alloc_dev(struct inet6_dev *idev) { if (snmp_mib_init((void __percpu **)idev->stats.ipv6, - sizeof(struct ipstats_mib)) < 0) + sizeof(struct ipstats_mib), + __alignof__(struct ipstats_mib)) < 0) goto err_ip; if (snmp_mib_init((void __percpu **)idev->stats.icmpv6, - sizeof(struct icmpv6_mib)) < 0) + sizeof(struct icmpv6_mib), + __alignof__(struct icmpv6_mib)) < 0) goto err_icmp; if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg, - sizeof(struct icmpv6msg_mib)) < 0) + sizeof(struct icmpv6msg_mib), + __alignof__(struct icmpv6msg_mib)) < 0) goto err_icmpmsg; return 0; @@ -557,7 +558,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) pr_warning("Freeing alive inet6 address %p\n", ifp); return; } - dst_release(&ifp->rt->u.dst); + dst_release(&ifp->rt->dst); call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); } @@ -823,7 +824,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) rt->rt6i_flags |= RTF_EXPIRES; } } - dst_release(&rt->u.dst); + dst_release(&rt->dst); } out: @@ -890,7 +891,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i idev->cnf.temp_valid_lft); tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, - idev->cnf.temp_prefered_lft - desync_factor / HZ); + idev->cnf.temp_prefered_lft - + idev->cnf.max_desync_factor); tmp_plen = ifp->prefix_len; max_addresses = idev->cnf.max_addresses; tmp_cstamp = ifp->cstamp; @@ -1650,7 +1652,8 @@ static void ipv6_regen_rndid(unsigned long data) expires = jiffies + idev->cnf.temp_prefered_lft * HZ - - idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - desync_factor; + idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - + idev->cnf.max_desync_factor * HZ; if (time_before(expires, jiffies)) { printk(KERN_WARNING "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n", @@ -1866,7 +1869,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) dev, expires, flags); } if (rt) - dst_release(&rt->u.dst); + dst_release(&rt->dst); } /* Try to figure out our local address for this prefix */ @@ -3496,8 +3499,12 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, preferred -= tval; else preferred = 0; - if (valid != INFINITY_LIFE_TIME) - valid -= tval; + if (valid != INFINITY_LIFE_TIME) { + if (valid > tval) + valid -= tval; + else + valid = 0; + } } } else { preferred = INFINITY_LIFE_TIME; @@ -3859,12 +3866,28 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, memset(&stats[items], 0, pad); } +static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib, + int items, int bytes, size_t syncpoff) +{ + int i; + int pad = bytes - sizeof(u64) * items; + BUG_ON(pad < 0); + + /* Use put_unaligned() because stats may not be aligned for u64. */ + put_unaligned(items, &stats[0]); + for (i = 1; i < items; i++) + put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]); + + memset(&stats[items], 0, pad); +} + static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes) { switch (attrtype) { case IFLA_INET6_STATS: - __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); + __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6, + IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp)); break; case IFLA_INET6_ICMP6STATS: __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); @@ -4097,11 +4120,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) if (ifp->idev->cnf.forwarding) addrconf_leave_anycast(ifp); addrconf_leave_solict(ifp->idev, &ifp->addr); - dst_hold(&ifp->rt->u.dst); + dst_hold(&ifp->rt->dst); if (ifp->state == INET6_IFADDR_STATE_DEAD && ip6_del_rt(ifp->rt)) - dst_free(&ifp->rt->u.dst); + dst_free(&ifp->rt->dst); break; } } diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 8c4348cb19505446e13471d09369610e41851146..f0e774cea386696a72560d9306f441a46817a96d 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -53,11 +53,7 @@ static struct ip6addrlbl_table static inline struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) { -#ifdef CONFIG_NET_NS - return lbl->lbl_net; -#else - return &init_net; -#endif + return read_pnet(&lbl->lbl_net); } /* diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e733942dafe124c00a67380d1bbd5f6d6acaf824..56b9bf2516f4d45b5b866e1e3086f54a05a7b530 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -522,10 +522,10 @@ const struct proto_ops inet6_stream_ops = { .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ - .sendmsg = tcp_sendmsg, /* ok */ - .recvmsg = sock_common_recvmsg, /* ok */ + .sendmsg = inet_sendmsg, /* ok */ + .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, - .sendpage = tcp_sendpage, + .sendpage = inet_sendpage, .splice_read = tcp_splice_read, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, @@ -549,7 +549,7 @@ const struct proto_ops inet6_dgram_ops = { .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ - .recvmsg = sock_common_recvmsg, /* ok */ + .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT @@ -651,7 +651,7 @@ int inet6_sk_rebuild_header(struct sock *sk) if (dst == NULL) { struct inet_sock *inet = inet_sk(sk); - struct in6_addr *final_p = NULL, final; + struct in6_addr *final_p, final; struct flowi fl; memset(&fl, 0, sizeof(fl)); @@ -665,12 +665,7 @@ int inet6_sk_rebuild_header(struct sock *sk) fl.fl_ip_sport = inet->inet_sport; security_sk_classify_flow(sk, &fl); - if (np->opt && np->opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, np->opt, &final); err = ip6_dst_lookup(sk, &dst, &fl); if (err) { @@ -976,19 +971,24 @@ static void ipv6_packet_cleanup(void) static int __net_init ipv6_init_mibs(struct net *net) { if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, - sizeof (struct udp_mib)) < 0) + sizeof(struct udp_mib), + __alignof__(struct udp_mib)) < 0) return -ENOMEM; if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6, - sizeof (struct udp_mib)) < 0) + sizeof(struct udp_mib), + __alignof__(struct udp_mib)) < 0) goto err_udplite_mib; if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics, - sizeof(struct ipstats_mib)) < 0) + sizeof(struct ipstats_mib), + __alignof__(struct ipstats_mib)) < 0) goto err_ip_mib; if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, - sizeof(struct icmpv6_mib)) < 0) + sizeof(struct icmpv6_mib), + __alignof__(struct icmpv6_mib)) < 0) goto err_icmp_mib; if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics, - sizeof(struct icmpv6msg_mib)) < 0) + sizeof(struct icmpv6msg_mib), + __alignof__(struct icmpv6msg_mib)) < 0) goto err_icmpmsg_mib; return 0; diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index b5b07054508a5154315c99f0da67b903e9ce46ee..0e5e943446f08c60690170c4e8af6fdc962a645e 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -77,41 +77,40 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) pac->acl_next = NULL; ipv6_addr_copy(&pac->acl_addr, addr); + rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); if (rt) { dev = rt->rt6i_dev; - dev_hold(dev); - dst_release(&rt->u.dst); + dst_release(&rt->dst); } else if (ishost) { err = -EADDRNOTAVAIL; - goto out_free_pac; + goto error; } else { /* router, no matching interface: just pick one */ - - dev = dev_get_by_flags(net, IFF_UP, IFF_UP|IFF_LOOPBACK); + dev = dev_get_by_flags_rcu(net, IFF_UP, + IFF_UP | IFF_LOOPBACK); } } else - dev = dev_get_by_index(net, ifindex); + dev = dev_get_by_index_rcu(net, ifindex); if (dev == NULL) { err = -ENODEV; - goto out_free_pac; + goto error; } - idev = in6_dev_get(dev); + idev = __in6_dev_get(dev); if (!idev) { if (ifindex) err = -ENODEV; else err = -EADDRNOTAVAIL; - goto out_dev_put; + goto error; } /* reset ishost, now that we have a specific device */ ishost = !idev->cnf.forwarding; - in6_dev_put(idev); pac->acl_ifindex = dev->ifindex; @@ -124,26 +123,22 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) if (ishost) err = -EADDRNOTAVAIL; if (err) - goto out_dev_put; + goto error; } err = ipv6_dev_ac_inc(dev, addr); - if (err) - goto out_dev_put; - - write_lock_bh(&ipv6_sk_ac_lock); - pac->acl_next = np->ipv6_ac_list; - np->ipv6_ac_list = pac; - write_unlock_bh(&ipv6_sk_ac_lock); - - dev_put(dev); - - return 0; + if (!err) { + write_lock_bh(&ipv6_sk_ac_lock); + pac->acl_next = np->ipv6_ac_list; + np->ipv6_ac_list = pac; + write_unlock_bh(&ipv6_sk_ac_lock); + pac = NULL; + } -out_dev_put: - dev_put(dev); -out_free_pac: - sock_kfree_s(sk, pac, sizeof(*pac)); +error: + rcu_read_unlock(); + if (pac) + sock_kfree_s(sk, pac, sizeof(*pac)); return err; } @@ -176,11 +171,12 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) write_unlock_bh(&ipv6_sk_ac_lock); - dev = dev_get_by_index(net, pac->acl_ifindex); - if (dev) { + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, pac->acl_ifindex); + if (dev) ipv6_dev_ac_dec(dev, &pac->acl_addr); - dev_put(dev); - } + rcu_read_unlock(); + sock_kfree_s(sk, pac, sizeof(*pac)); return 0; } @@ -199,13 +195,12 @@ void ipv6_sock_ac_close(struct sock *sk) write_unlock_bh(&ipv6_sk_ac_lock); prev_index = 0; + rcu_read_lock(); while (pac) { struct ipv6_ac_socklist *next = pac->acl_next; if (pac->acl_ifindex != prev_index) { - if (dev) - dev_put(dev); - dev = dev_get_by_index(net, pac->acl_ifindex); + dev = dev_get_by_index_rcu(net, pac->acl_ifindex); prev_index = pac->acl_ifindex; } if (dev) @@ -213,8 +208,7 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } - if (dev) - dev_put(dev); + rcu_read_unlock(); } #if 0 @@ -250,7 +244,7 @@ static void aca_put(struct ifacaddr6 *ac) { if (atomic_dec_and_test(&ac->aca_refcnt)) { in6_dev_put(ac->aca_idev); - dst_release(&ac->aca_rt->u.dst); + dst_release(&ac->aca_rt->dst); kfree(ac); } } @@ -356,40 +350,39 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr) write_unlock_bh(&idev->lock); addrconf_leave_solict(idev, &aca->aca_addr); - dst_hold(&aca->aca_rt->u.dst); + dst_hold(&aca->aca_rt->dst); ip6_del_rt(aca->aca_rt); aca_put(aca); return 0; } +/* called with rcu_read_lock() */ static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) { - int ret; - struct inet6_dev *idev = in6_dev_get(dev); + struct inet6_dev *idev = __in6_dev_get(dev); + if (idev == NULL) return -ENODEV; - ret = __ipv6_dev_ac_dec(idev, addr); - in6_dev_put(idev); - return ret; + return __ipv6_dev_ac_dec(idev, addr); } /* * check if the interface has this anycast address + * called with rcu_read_lock() */ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) { struct inet6_dev *idev; struct ifacaddr6 *aca; - idev = in6_dev_get(dev); + idev = __in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); for (aca = idev->ac_list; aca; aca = aca->aca_next) if (ipv6_addr_equal(&aca->aca_addr, addr)) break; read_unlock_bh(&idev->lock); - in6_dev_put(idev); return aca != NULL; } return 0; @@ -403,14 +396,15 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, { int found = 0; - if (dev) - return ipv6_chk_acast_dev(dev, addr); rcu_read_lock(); - for_each_netdev_rcu(net, dev) - if (ipv6_chk_acast_dev(dev, addr)) { - found = 1; - break; - } + if (dev) + found = ipv6_chk_acast_dev(dev, addr); + else + for_each_netdev_rcu(net, dev) + if (ipv6_chk_acast_dev(dev, addr)) { + found = 1; + break; + } rcu_read_unlock(); return found; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 712684687c9a9d8f25baca4ff07a86f0386820bd..7d929a22cbc2f505aca94ba5a6fd9cae412824dc 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -38,10 +38,11 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); - struct in6_addr *daddr, *final_p = NULL, final; + struct in6_addr *daddr, *final_p, final; struct dst_entry *dst; struct flowi fl; struct ip6_flowlabel *flowlabel = NULL; + struct ipv6_txoptions *opt; int addr_type; int err; @@ -155,19 +156,8 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) security_sk_classify_flow(sk, &fl); - if (flowlabel) { - if (flowlabel->opt && flowlabel->opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } - } else if (np->opt && np->opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + opt = flowlabel ? flowlabel->opt : np->opt; + final_p = fl6_update_dst(&fl, opt, &final); err = ip6_dst_lookup(sk, &dst, &fl); if (err) diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 8a659f92d17af2a271f1c4a0ba6e72af6f6bbea1..262f105d23b9399e3f6f043e307df3bf69535f6c 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -312,6 +312,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) Routing header. ********************************/ +/* called with rcu_read_lock() */ static int ipv6_rthdr_rcv(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); @@ -324,12 +325,9 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) struct net *net = dev_net(skb->dev); int accept_source_route = net->ipv6.devconf_all->accept_source_route; - idev = in6_dev_get(skb->dev); - if (idev) { - if (accept_source_route > idev->cnf.accept_source_route) - accept_source_route = idev->cnf.accept_source_route; - in6_dev_put(idev); - } + idev = __in6_dev_get(skb->dev); + if (idev && accept_source_route > idev->cnf.accept_source_route) + accept_source_route = idev->cnf.accept_source_route; if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + @@ -874,3 +872,27 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, return opt; } +/** + * fl6_update_dst - update flowi destination address with info given + * by srcrt option, if any. + * + * @fl: flowi for which fl6_dst is to be updated + * @opt: struct ipv6_txoptions in which to look for srcrt opt + * @orig: copy of original fl6_dst address if modified + * + * Returns NULL if no txoptions or no srcrt, otherwise returns orig + * and initial value of fl->fl6_dst set in orig + */ +struct in6_addr *fl6_update_dst(struct flowi *fl, + const struct ipv6_txoptions *opt, + struct in6_addr *orig) +{ + if (!opt || !opt->srcrt) + return NULL; + + ipv6_addr_copy(orig, &fl->fl6_dst); + ipv6_addr_copy(&fl->fl6_dst, ((struct rt0_hdr *)opt->srcrt)->addr); + return orig; +} + +EXPORT_SYMBOL_GPL(fl6_update_dst); diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 8e44f8f9c18846c8929e44b14e99c8825783916a..b1108ede18e1c4cec4eea90f9741441add7047fb 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -43,8 +43,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl, if (arg.result) return arg.result; - dst_hold(&net->ipv6.ip6_null_entry->u.dst); - return &net->ipv6.ip6_null_entry->u.dst; + dst_hold(&net->ipv6.ip6_null_entry->dst); + return &net->ipv6.ip6_null_entry->dst; } static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, @@ -86,7 +86,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, struct in6_addr saddr; if (ipv6_dev_get_saddr(net, - ip6_dst_idev(&rt->u.dst)->dev, + ip6_dst_idev(&rt->dst)->dev, &flp->fl6_dst, rt6_flags2srcprefs(flags), &saddr)) @@ -99,12 +99,12 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, goto out; } again: - dst_release(&rt->u.dst); + dst_release(&rt->dst); rt = NULL; goto out; discard_pkt: - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); out: arg->result = rt; return rt == NULL ? -EAGAIN : 0; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 0c5e3c3b7fd56d87222ec0c5a691f1e961a9f58b..8a1628023bd1826809fded79d7dc7c6c4aa6f41e 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb) struct ipv6_pinfo *np = inet6_sk(sk); struct flowi fl; struct dst_entry *dst; - struct in6_addr *final_p = NULL, final; + struct in6_addr *final_p, final; memset(&fl, 0, sizeof(fl)); fl.proto = sk->sk_protocol; @@ -199,12 +199,7 @@ int inet6_csk_xmit(struct sk_buff *skb) fl.fl_ip_dport = inet->inet_dport; security_sk_classify_flow(sk, &fl); - if (np->opt && np->opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, np->opt, &final); dst = __inet6_csk_dst_check(sk, np->dst_cookie); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 92a122b7795d4c9d67049bc747f3875414f016ad..b6a585909d3560d0f9c0396fe8ed4b136c0ef5f6 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -165,7 +165,7 @@ static __inline__ void node_free(struct fib6_node * fn) static __inline__ void rt6_release(struct rt6_info *rt) { if (atomic_dec_and_test(&rt->rt6i_ref)) - dst_free(&rt->u.dst); + dst_free(&rt->dst); } static void fib6_link_table(struct net *net, struct fib6_table *tb) @@ -278,7 +278,7 @@ static int fib6_dump_node(struct fib6_walker_t *w) int res; struct rt6_info *rt; - for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { + for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { res = rt6_dump_route(rt, w->args); if (res < 0) { /* Frame is full, suspend walking */ @@ -619,7 +619,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, ins = &fn->leaf; - for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { + for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) { /* * Search for duplicates */ @@ -647,7 +647,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, if (iter->rt6i_metric > rt->rt6i_metric) break; - ins = &iter->u.dst.rt6_next; + ins = &iter->dst.rt6_next; } /* Reset round-robin state, if necessary */ @@ -658,7 +658,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, * insert node */ - rt->u.dst.rt6_next = iter; + rt->dst.rt6_next = iter; *ins = rt; rt->rt6i_node = fn; atomic_inc(&rt->rt6i_ref); @@ -799,7 +799,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) atomic_inc(&pn->leaf->rt6i_ref); } #endif - dst_free(&rt->u.dst); + dst_free(&rt->dst); } return err; @@ -810,7 +810,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) st_failure: if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) fib6_repair_tree(info->nl_net, fn); - dst_free(&rt->u.dst); + dst_free(&rt->dst); return err; #endif } @@ -1108,7 +1108,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, RT6_TRACE("fib6_del_route\n"); /* Unlink it */ - *rtp = rt->u.dst.rt6_next; + *rtp = rt->dst.rt6_next; rt->rt6i_node = NULL; net->ipv6.rt6_stats->fib_rt_entries--; net->ipv6.rt6_stats->fib_discarded_routes++; @@ -1122,14 +1122,14 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, FOR_WALKERS(w) { if (w->state == FWS_C && w->leaf == rt) { RT6_TRACE("walker %p adjusted by delroute\n", w); - w->leaf = rt->u.dst.rt6_next; + w->leaf = rt->dst.rt6_next; if (w->leaf == NULL) w->state = FWS_U; } } read_unlock(&fib6_walker_lock); - rt->u.dst.rt6_next = NULL; + rt->dst.rt6_next = NULL; /* If it was last route, expunge its radix tree node */ if (fn->leaf == NULL) { @@ -1168,7 +1168,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) struct rt6_info **rtp; #if RT6_DEBUG >= 2 - if (rt->u.dst.obsolete>0) { + if (rt->dst.obsolete>0) { WARN_ON(fn != NULL); return -ENOENT; } @@ -1195,7 +1195,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) * Walk the leaf entries looking for ourself */ - for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) { + for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) { if (*rtp == rt) { fib6_del_route(fn, rtp, info); return 0; @@ -1334,7 +1334,7 @@ static int fib6_clean_node(struct fib6_walker_t *w) .nl_net = c->net, }; - for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { + for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { res = c->func(rt, c->arg); if (res < 0) { w->leaf = rt; @@ -1448,8 +1448,8 @@ static int fib6_age(struct rt6_info *rt, void *arg) } gc_args.more++; } else if (rt->rt6i_flags & RTF_CACHE) { - if (atomic_read(&rt->u.dst.__refcnt) == 0 && - time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) { + if (atomic_read(&rt->dst.__refcnt) == 0 && + time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) { RT6_TRACE("aging clone %p\n", rt); return -1; } else if ((rt->rt6i_flags & RTF_GATEWAY) && diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 89425af0684cb160d1078fed8f3a56fa852ec58e..d40b330c0ee698af62f51f90caf86b2e6cf04c9f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -698,7 +698,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); for (;;) { /* Prepare header of the next frame, @@ -726,7 +726,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) err = output(skb); if(!err) - IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), + IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) @@ -740,9 +740,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) kfree(tmp_hdr); if (err == 0) { - IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), + IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); - dst_release(&rt->u.dst); + dst_release(&rt->dst); return 0; } @@ -752,9 +752,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) frag = skb; } - IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), + IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); - dst_release(&rt->u.dst); + dst_release(&rt->dst); return err; } @@ -785,7 +785,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) * Allocate buffer. */ - if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { + if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); @@ -798,7 +798,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) */ ip6_copy_metadata(frag, skb); - skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); + skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); @@ -1156,24 +1156,24 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, /* need source address above miyazawa*/ } - dst_hold(&rt->u.dst); - inet->cork.dst = &rt->u.dst; + dst_hold(&rt->dst); + inet->cork.dst = &rt->dst; inet->cork.fl = *fl; np->cork.hop_limit = hlimit; np->cork.tclass = tclass; mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? - rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path); + rt->dst.dev->mtu : dst_mtu(rt->dst.path); if (np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } inet->cork.fragsize = mtu; - if (dst_allfrag(rt->u.dst.path)) + if (dst_allfrag(rt->dst.path)) inet->cork.flags |= IPCORK_ALLFRAG; inet->cork.length = 0; sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; - exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0) - + exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; length += exthdrlen; transhdrlen += exthdrlen; @@ -1186,7 +1186,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, mtu = inet->cork.fragsize; } - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); + hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); @@ -1224,7 +1224,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, } if (proto == IPPROTO_UDP && - (rt->u.dst.dev->features & NETIF_F_UFO)) { + (rt->dst.dev->features & NETIF_F_UFO)) { err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, fragheaderlen, @@ -1270,7 +1270,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, fraglen = datalen + fragheaderlen; if ((flags & MSG_MORE) && - !(rt->u.dst.dev->features&NETIF_F_SG)) + !(rt->dst.dev->features&NETIF_F_SG)) alloclen = mtu; else alloclen = datalen + fragheaderlen; @@ -1281,7 +1281,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, * because we have no idea if we're the last one. */ if (datalen == length + fraggap) - alloclen += rt->u.dst.trailer_len; + alloclen += rt->dst.trailer_len; /* * We just reserve space for fragment header. @@ -1358,7 +1358,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, if (copy > length) copy = length; - if (!(rt->u.dst.dev->features&NETIF_F_SG)) { + if (!(rt->dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; @@ -1503,7 +1503,7 @@ int ip6_push_pending_frames(struct sock *sk) skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - skb_dst_set(skb, dst_clone(&rt->u.dst)); + skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 8f39893d808153993dcecf2326dd453dd2b630b9..0fd027f3f47e170608d75ea735a74f535838b80a 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -552,7 +552,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) goto out; - skb2->dev = rt->u.dst.dev; + skb2->dev = rt->dst.dev; /* route "incoming" packet */ if (rt->rt_flags & RTCF_LOCAL) { @@ -562,7 +562,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, fl.fl4_src = eiph->saddr; fl.fl4_tos = eiph->tos; if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || - rt->u.dst.dev->type != ARPHRD_TUNNEL) { + rt->dst.dev->type != ARPHRD_TUNNEL) { ip_rt_put(rt); goto out; } @@ -626,7 +626,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, icmpv6_send(skb2, rel_type, rel_code, rel_info); if (rt) - dst_release(&rt->u.dst); + dst_release(&rt->dst); kfree_skb(skb2); } @@ -1135,7 +1135,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } - dst_release(&rt->u.dst); + dst_release(&rt->dst); } } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index bd43f0152c21d5e7f5dca9b609a735d1a0573bac..a7f66bc8f0b0ef2a8b0eee454c1e103344eda417 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -55,8 +55,6 @@ #include -DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; - struct ip6_ra_chain *ip6_ra_chain; DEFINE_RWLOCK(ip6_ra_lock); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index ab1622d7d409f0a853a956ee06db49aad930050a..d1444b95ad7e527c8c0f3e00b906567d5043ba21 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -152,18 +152,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) mc_lst->next = NULL; ipv6_addr_copy(&mc_lst->addr, addr); + rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); if (rt) { dev = rt->rt6i_dev; - dev_hold(dev); - dst_release(&rt->u.dst); + dst_release(&rt->dst); } } else - dev = dev_get_by_index(net, ifindex); + dev = dev_get_by_index_rcu(net, ifindex); if (dev == NULL) { + rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return -ENODEV; } @@ -180,8 +181,8 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) err = ipv6_dev_mc_inc(dev, addr); if (err) { + rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); - dev_put(dev); return err; } @@ -190,7 +191,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) np->ipv6_mc_list = mc_lst; write_unlock_bh(&ipv6_sk_mc_lock); - dev_put(dev); + rcu_read_unlock(); return 0; } @@ -213,18 +214,17 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) *lnk = mc_lst->next; write_unlock_bh(&ipv6_sk_mc_lock); - dev = dev_get_by_index(net, mc_lst->ifindex); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, mc_lst->ifindex); if (dev != NULL) { - struct inet6_dev *idev = in6_dev_get(dev); + struct inet6_dev *idev = __in6_dev_get(dev); (void) ip6_mc_leave_src(sk, mc_lst, idev); - if (idev) { + if (idev) __ipv6_dev_mc_dec(idev, &mc_lst->addr); - in6_dev_put(idev); - } - dev_put(dev); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); + rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return 0; } @@ -234,43 +234,36 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return -EADDRNOTAVAIL; } -static struct inet6_dev *ip6_mc_find_dev(struct net *net, - struct in6_addr *group, - int ifindex) +/* called with rcu_read_lock() */ +static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, + struct in6_addr *group, + int ifindex) { struct net_device *dev = NULL; struct inet6_dev *idev = NULL; if (ifindex == 0) { - struct rt6_info *rt; + struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0); - rt = rt6_lookup(net, group, NULL, 0, 0); if (rt) { dev = rt->rt6i_dev; dev_hold(dev); - dst_release(&rt->u.dst); + dst_release(&rt->dst); } } else - dev = dev_get_by_index(net, ifindex); + dev = dev_get_by_index_rcu(net, ifindex); if (!dev) - goto nodev; - idev = in6_dev_get(dev); + return NULL; + idev = __in6_dev_get(dev); if (!idev) - goto release; + return NULL;; read_lock_bh(&idev->lock); - if (idev->dead) - goto unlock_release; - + if (idev->dead) { + read_unlock_bh(&idev->lock); + return NULL; + } return idev; - -unlock_release: - read_unlock_bh(&idev->lock); - in6_dev_put(idev); -release: - dev_put(dev); -nodev: - return NULL; } void ipv6_sock_mc_close(struct sock *sk) @@ -286,19 +279,17 @@ void ipv6_sock_mc_close(struct sock *sk) np->ipv6_mc_list = mc_lst->next; write_unlock_bh(&ipv6_sk_mc_lock); - dev = dev_get_by_index(net, mc_lst->ifindex); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, mc_lst->ifindex); if (dev) { - struct inet6_dev *idev = in6_dev_get(dev); + struct inet6_dev *idev = __in6_dev_get(dev); (void) ip6_mc_leave_src(sk, mc_lst, idev); - if (idev) { + if (idev) __ipv6_dev_mc_dec(idev, &mc_lst->addr); - in6_dev_put(idev); - } - dev_put(dev); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); - + rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); write_lock_bh(&ipv6_sk_mc_lock); @@ -327,14 +318,17 @@ int ip6_mc_source(int add, int omode, struct sock *sk, if (!ipv6_addr_is_multicast(group)) return -EINVAL; - idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface); - if (!idev) + rcu_read_lock(); + idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface); + if (!idev) { + rcu_read_unlock(); return -ENODEV; + } dev = idev->dev; err = -EADDRNOTAVAIL; - read_lock_bh(&ipv6_sk_mc_lock); + read_lock(&ipv6_sk_mc_lock); for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) continue; @@ -358,7 +352,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, pmc->sfmode = omode; } - write_lock_bh(&pmc->sflock); + write_lock(&pmc->sflock); pmclocked = 1; psl = pmc->sflist; @@ -433,11 +427,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk, ip6_mc_add_src(idev, group, omode, 1, source, 1); done: if (pmclocked) - write_unlock_bh(&pmc->sflock); - read_unlock_bh(&ipv6_sk_mc_lock); + write_unlock(&pmc->sflock); + read_unlock(&ipv6_sk_mc_lock); read_unlock_bh(&idev->lock); - in6_dev_put(idev); - dev_put(dev); + rcu_read_unlock(); if (leavegroup) return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); return err; @@ -463,14 +456,17 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) gsf->gf_fmode != MCAST_EXCLUDE) return -EINVAL; - idev = ip6_mc_find_dev(net, group, gsf->gf_interface); + rcu_read_lock(); + idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); - if (!idev) + if (!idev) { + rcu_read_unlock(); return -ENODEV; + } dev = idev->dev; err = 0; - read_lock_bh(&ipv6_sk_mc_lock); + read_lock(&ipv6_sk_mc_lock); if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { leavegroup = 1; @@ -512,7 +508,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); } - write_lock_bh(&pmc->sflock); + write_lock(&pmc->sflock); psl = pmc->sflist; if (psl) { (void) ip6_mc_del_src(idev, group, pmc->sfmode, @@ -522,13 +518,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = gsf->gf_fmode; - write_unlock_bh(&pmc->sflock); + write_unlock(&pmc->sflock); err = 0; done: - read_unlock_bh(&ipv6_sk_mc_lock); + read_unlock(&ipv6_sk_mc_lock); read_unlock_bh(&idev->lock); - in6_dev_put(idev); - dev_put(dev); + rcu_read_unlock(); if (leavegroup) err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); return err; @@ -551,11 +546,13 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, if (!ipv6_addr_is_multicast(group)) return -EINVAL; - idev = ip6_mc_find_dev(net, group, gsf->gf_interface); + rcu_read_lock(); + idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); - if (!idev) + if (!idev) { + rcu_read_unlock(); return -ENODEV; - + } dev = idev->dev; err = -EADDRNOTAVAIL; @@ -577,8 +574,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, psl = pmc->sflist; count = psl ? psl->sl_count : 0; read_unlock_bh(&idev->lock); - in6_dev_put(idev); - dev_put(dev); + rcu_read_unlock(); copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; gsf->gf_numsrc = count; @@ -604,8 +600,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, return 0; done: read_unlock_bh(&idev->lock); - in6_dev_put(idev); - dev_put(dev); + rcu_read_unlock(); return err; } @@ -822,6 +817,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) struct ifmcaddr6 *mc; struct inet6_dev *idev; + /* we need to take a reference on idev */ idev = in6_dev_get(dev); if (idev == NULL) @@ -860,7 +856,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); ipv6_addr_copy(&mc->mca_addr, addr); - mc->idev = idev; + mc->idev = idev; /* (reference taken) */ mc->mca_users = 1; /* mca_stamp should be updated upon changes */ mc->mca_cstamp = mc->mca_tstamp = jiffies; @@ -915,16 +911,18 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) { - struct inet6_dev *idev = in6_dev_get(dev); + struct inet6_dev *idev; int err; - if (!idev) - return -ENODEV; - - err = __ipv6_dev_mc_dec(idev, addr); + rcu_read_lock(); - in6_dev_put(idev); + idev = __in6_dev_get(dev); + if (!idev) + err = -ENODEV; + else + err = __ipv6_dev_mc_dec(idev, addr); + rcu_read_unlock(); return err; } @@ -965,7 +963,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, struct ifmcaddr6 *mc; int rv = 0; - idev = in6_dev_get(dev); + rcu_read_lock(); + idev = __in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); for (mc = idev->mc_list; mc; mc=mc->next) { @@ -992,8 +991,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, rv = 1; /* don't filter unspecified source */ } read_unlock_bh(&idev->lock); - in6_dev_put(idev); } + rcu_read_unlock(); return rv; } @@ -1104,6 +1103,7 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, return 1; } +/* called with rcu_read_lock() */ int igmp6_event_query(struct sk_buff *skb) { struct mld2_query *mlh2 = NULL; @@ -1127,7 +1127,7 @@ int igmp6_event_query(struct sk_buff *skb) if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) return -EINVAL; - idev = in6_dev_get(skb->dev); + idev = __in6_dev_get(skb->dev); if (idev == NULL) return 0; @@ -1137,10 +1137,8 @@ int igmp6_event_query(struct sk_buff *skb) group_type = ipv6_addr_type(group); if (group_type != IPV6_ADDR_ANY && - !(group_type&IPV6_ADDR_MULTICAST)) { - in6_dev_put(idev); + !(group_type&IPV6_ADDR_MULTICAST)) return -EINVAL; - } if (len == 24) { int switchback; @@ -1161,10 +1159,9 @@ int igmp6_event_query(struct sk_buff *skb) } else if (len >= 28) { int srcs_offset = sizeof(struct mld2_query) - sizeof(struct icmp6hdr); - if (!pskb_may_pull(skb, srcs_offset)) { - in6_dev_put(idev); + if (!pskb_may_pull(skb, srcs_offset)) return -EINVAL; - } + mlh2 = (struct mld2_query *)skb_transport_header(skb); max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; if (!max_delay) @@ -1173,28 +1170,23 @@ int igmp6_event_query(struct sk_buff *skb) if (mlh2->mld2q_qrv) idev->mc_qrv = mlh2->mld2q_qrv; if (group_type == IPV6_ADDR_ANY) { /* general query */ - if (mlh2->mld2q_nsrcs) { - in6_dev_put(idev); + if (mlh2->mld2q_nsrcs) return -EINVAL; /* no sources allowed */ - } + mld_gq_start_timer(idev); - in6_dev_put(idev); return 0; } /* mark sources to include, if group & source-specific */ if (mlh2->mld2q_nsrcs != 0) { if (!pskb_may_pull(skb, srcs_offset + - ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) { - in6_dev_put(idev); + ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) return -EINVAL; - } + mlh2 = (struct mld2_query *)skb_transport_header(skb); mark = 1; } - } else { - in6_dev_put(idev); + } else return -EINVAL; - } read_lock_bh(&idev->lock); if (group_type == IPV6_ADDR_ANY) { @@ -1227,12 +1219,11 @@ int igmp6_event_query(struct sk_buff *skb) } } read_unlock_bh(&idev->lock); - in6_dev_put(idev); return 0; } - +/* called with rcu_read_lock() */ int igmp6_event_report(struct sk_buff *skb) { struct ifmcaddr6 *ma; @@ -1260,7 +1251,7 @@ int igmp6_event_report(struct sk_buff *skb) !(addr_type&IPV6_ADDR_LINKLOCAL)) return -EINVAL; - idev = in6_dev_get(skb->dev); + idev = __in6_dev_get(skb->dev); if (idev == NULL) return -ENODEV; @@ -1280,7 +1271,6 @@ int igmp6_event_report(struct sk_buff *skb) } } read_unlock_bh(&idev->lock); - in6_dev_put(idev); return 0; } @@ -1396,12 +1386,14 @@ static void mld_sendpack(struct sk_buff *skb) struct mld2_report *pmr = (struct mld2_report *)skb_transport_header(skb); int payload_len, mldlen; - struct inet6_dev *idev = in6_dev_get(skb->dev); + struct inet6_dev *idev; struct net *net = dev_net(skb->dev); int err; struct flowi fl; struct dst_entry *dst; + rcu_read_lock(); + idev = __in6_dev_get(skb->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); @@ -1441,8 +1433,7 @@ static void mld_sendpack(struct sk_buff *skb) } else IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); - if (likely(idev != NULL)) - in6_dev_put(idev); + rcu_read_unlock(); return; err_out: @@ -1779,7 +1770,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) IPPROTO_ICMPV6, csum_partial(hdr, len, 0)); - idev = in6_dev_get(skb->dev); + rcu_read_lock(); + idev = __in6_dev_get(skb->dev); dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); if (!dst) { @@ -1806,8 +1798,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) } else IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); - if (likely(idev != NULL)) - in6_dev_put(idev); + rcu_read_unlock(); return; err_out: @@ -1998,8 +1989,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc) &psf->sf_addr)) break; if (!dpsf) { - dpsf = (struct ip6_sf_list *) - kmalloc(sizeof(*dpsf), GFP_ATOMIC); + dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); if (!dpsf) continue; *dpsf = *psf; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 2efef52fb4616fb5d598026a35ecf1d76aba0fd8..58841c4ae947f982d3377009066ed636fa37ea75 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1229,7 +1229,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) ND_PRINTK0(KERN_ERR "ICMPv6 RA: %s() got default router without neighbour.\n", __func__); - dst_release(&rt->u.dst); + dst_release(&rt->dst); in6_dev_put(in6_dev); return; } @@ -1244,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) if (ra_msg->icmph.icmp6_hop_limit) { in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; if (rt) - rt->u.dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit; + rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit; } skip_defrtr: @@ -1363,7 +1363,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) in6_dev->cnf.mtu6 = mtu; if (rt) - rt->u.dst.metrics[RTAX_MTU-1] = mtu; + rt->dst.metrics[RTAX_MTU-1] = mtu; rt6_mtu_change(skb->dev, mtu); } @@ -1384,7 +1384,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) } out: if (rt) - dst_release(&rt->u.dst); + dst_release(&rt->dst); else if (neigh) neigh_release(neigh); in6_dev_put(in6_dev); diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index a74951c039b6abdcc8844202b082eefd15da7da1..7155b2451d7cf297ab2b87d244e217ccf82f4071 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -151,9 +151,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, protocol, csum_sub(0, hsum))); skb->ip_summed = CHECKSUM_NONE; - csum = __skb_checksum_complete_head(skb, dataoff + len); - if (!csum) - skb->ip_summed = CHECKSUM_UNNECESSARY; + return __skb_checksum_complete_head(skb, dataoff + len); } return csum; }; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 8c201743d96d3fe8366603fc29d071c773c07166..413ab0754e1fe4923c139e632444f2cfcc623d86 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -43,7 +43,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; -static DEFINE_RWLOCK(queue_lock); +static DEFINE_SPINLOCK(queue_lock); static int peer_pid __read_mostly; static unsigned int copy_range __read_mostly; static unsigned int queue_total; @@ -73,10 +73,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range) break; case IPQ_COPY_PACKET: - copy_mode = mode; + if (range > 0xFFFF) + range = 0xFFFF; copy_range = range; - if (copy_range > 0xFFFF) - copy_range = 0xFFFF; + copy_mode = mode; break; default: @@ -102,7 +102,7 @@ ipq_find_dequeue_entry(unsigned long id) { struct nf_queue_entry *entry = NULL, *i; - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); list_for_each_entry(i, &queue_list, list) { if ((unsigned long)i == id) { @@ -116,7 +116,7 @@ ipq_find_dequeue_entry(unsigned long id) queue_total--; } - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return entry; } @@ -137,9 +137,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data) static void ipq_flush(ipq_cmpfn cmpfn, unsigned long data) { - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); __ipq_flush(cmpfn, data); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); } static struct sk_buff * @@ -153,9 +153,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) struct nlmsghdr *nlh; struct timeval tv; - read_lock_bh(&queue_lock); - - switch (copy_mode) { + switch (ACCESS_ONCE(copy_mode)) { case IPQ_COPY_META: case IPQ_COPY_NONE: size = NLMSG_SPACE(sizeof(*pmsg)); @@ -163,26 +161,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) case IPQ_COPY_PACKET: if (entry->skb->ip_summed == CHECKSUM_PARTIAL && - (*errp = skb_checksum_help(entry->skb))) { - read_unlock_bh(&queue_lock); + (*errp = skb_checksum_help(entry->skb))) return NULL; - } - if (copy_range == 0 || copy_range > entry->skb->len) + + data_len = ACCESS_ONCE(copy_range); + if (data_len == 0 || data_len > entry->skb->len) data_len = entry->skb->len; - else - data_len = copy_range; size = NLMSG_SPACE(sizeof(*pmsg) + data_len); break; default: *errp = -EINVAL; - read_unlock_bh(&queue_lock); return NULL; } - read_unlock_bh(&queue_lock); - skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; @@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) if (nskb == NULL) return status; - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); if (!peer_pid) goto err_out_free_nskb; @@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) __ipq_enqueue_entry(entry); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return status; err_out_free_nskb: kfree_skb(nskb); err_out_unlock: - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return status; } @@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range) { int status; - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); status = __ipq_set_mode(mode, range); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return status; } @@ -441,11 +434,11 @@ __ipq_rcv_skb(struct sk_buff *skb) if (security_netlink_recv(skb, CAP_NET_ADMIN)) RCV_SKB_FAIL(-EPERM); - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); if (peer_pid) { if (peer_pid != pid) { - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); RCV_SKB_FAIL(-EBUSY); } } else { @@ -453,7 +446,7 @@ __ipq_rcv_skb(struct sk_buff *skb) peer_pid = pid; } - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); status = ipq_receive_peer(NLMSG_DATA(nlh), type, nlmsglen - NLMSG_LENGTH(0)); @@ -498,10 +491,10 @@ ipq_rcv_nl_event(struct notifier_block *this, struct netlink_notify *n = ptr; if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { - write_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) __ipq_reset(); - write_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); } return NOTIFY_DONE; } @@ -528,7 +521,7 @@ static ctl_table ipq_table[] = { #ifdef CONFIG_PROC_FS static int ip6_queue_show(struct seq_file *m, void *v) { - read_lock_bh(&queue_lock); + spin_lock_bh(&queue_lock); seq_printf(m, "Peer PID : %d\n" @@ -546,7 +539,7 @@ static int ip6_queue_show(struct seq_file *m, void *v) queue_dropped, queue_user_dropped); - read_unlock_bh(&queue_lock); + spin_unlock_bh(&queue_lock); return 0; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 9d2d68f0e6053d97fdfc3f350135604309a35808..5359ef4daac5230e4c691c2100d8016dcb69e102 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -387,9 +387,7 @@ ip6t_do_table(struct sk_buff *skb, goto no_match; } - ADD_COUNTER(e->counters, - ntohs(ipv6_hdr(skb)->payload_len) + - sizeof(struct ipv6hdr), 1); + ADD_COUNTER(e->counters, skb->len, 1); t = ip6t_get_target_c(e); IP_NF_ASSERT(t->u.kernel.target); @@ -899,7 +897,7 @@ get_counters(const struct xt_table_info *t, struct ip6t_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu; + unsigned int curcpu = get_cpu(); /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters @@ -909,14 +907,16 @@ get_counters(const struct xt_table_info *t, * if new softirq were to run and call ipt_do_table */ local_bh_disable(); - curcpu = smp_processor_id(); - i = 0; xt_entry_foreach(iter, t->entries[curcpu], t->size) { SET_COUNTER(counters[i], iter->counters.bcnt, iter->counters.pcnt); ++i; } + local_bh_enable(); + /* Processing counters from other cpus, we can let bottom half enabled, + * (preemption is disabled) + */ for_each_possible_cpu(cpu) { if (cpu == curcpu) @@ -930,7 +930,7 @@ get_counters(const struct xt_table_info *t, } xt_info_wrunlock(cpu); } - local_bh_enable(); + put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -943,7 +943,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc_node(countersize, numa_node_id()); + counters = vmalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1213,8 +1213,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ip6t_entry *iter; ret = 0; - counters = vmalloc_node(num_counters * sizeof(struct xt_counters), - numa_node_id()); + counters = vmalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; @@ -1368,7 +1367,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; - paddc = vmalloc_node(len - size, numa_node_id()); + paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index af4ee11f206609861708443150ae20abbc8e64b6..0a07ae7b933f2fbbac6fbf8cf9741d8bea6882e6 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -373,6 +373,56 @@ static void dump_packet(const struct nf_loginfo *info, printk("MARK=0x%x ", skb->mark); } +static void dump_mac_header(const struct nf_loginfo *info, + const struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + unsigned int logflags = 0; + + if (info->type == NF_LOG_TYPE_LOG) + logflags = info->u.log.logflags; + + if (!(logflags & IP6T_LOG_MACDECODE)) + goto fallback; + + switch (dev->type) { + case ARPHRD_ETHER: + printk("MACSRC=%pM MACDST=%pM MACPROTO=%04x ", + eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, + ntohs(eth_hdr(skb)->h_proto)); + return; + default: + break; + } + +fallback: + printk("MAC="); + if (dev->hard_header_len && + skb->mac_header != skb->network_header) { + const unsigned char *p = skb_mac_header(skb); + unsigned int len = dev->hard_header_len; + unsigned int i; + + if (dev->type == ARPHRD_SIT && + (p -= ETH_HLEN) < skb->head) + p = NULL; + + if (p != NULL) { + printk("%02x", *p++); + for (i = 1; i < len; i++) + printk(":%02x", p[i]); + } + printk(" "); + + if (dev->type == ARPHRD_SIT) { + const struct iphdr *iph = + (struct iphdr *)skb_mac_header(skb); + printk("TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr); + } + } else + printk(" "); +} + static struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { @@ -400,35 +450,10 @@ ip6t_log_packet(u_int8_t pf, prefix, in ? in->name : "", out ? out->name : ""); - if (in && !out) { - unsigned int len; - /* MAC logging for input chain only. */ - printk("MAC="); - if (skb->dev && (len = skb->dev->hard_header_len) && - skb->mac_header != skb->network_header) { - const unsigned char *p = skb_mac_header(skb); - int i; - - if (skb->dev->type == ARPHRD_SIT && - (p -= ETH_HLEN) < skb->head) - p = NULL; - - if (p != NULL) { - for (i = 0; i < len; i++) - printk("%02x%s", p[i], - i == len - 1 ? "" : ":"); - } - printk(" "); - if (skb->dev->type == ARPHRD_SIT) { - const struct iphdr *iph = - (struct iphdr *)skb_mac_header(skb); - printk("TUNNEL=%pI4->%pI4 ", - &iph->saddr, &iph->daddr); - } - } else - printk(" "); - } + /* MAC logging for input path only. */ + if (in && !out) + dump_mac_header(loginfo, skb); dump_packet(loginfo, skb, skb_network_offset(skb), 1); printk("\n"); diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 9be81776415ed67a4a09d536d742cf80e4ea7941..1df3c8b6bf4723668e6b8c43666b3e31f4583fd8 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -208,7 +208,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, type = icmp6h->icmp6_type - 130; if (type >= 0 && type < sizeof(noct_valid_new) && noct_valid_new[type]) { - skb->nfct = &nf_conntrack_untracked.ct_general; + skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); return NF_ACCEPT; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 6fb890187de09b01f8dc6453b7862a70de2edb8f..098a050a20b09817095f8241dfa6395fd14b43d2 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -114,10 +114,8 @@ static void nf_skb_free(struct sk_buff *skb) } /* Memory Tracking Functions. */ -static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) +static void frag_kfree_skb(struct sk_buff *skb) { - if (work) - *work -= skb->truesize; atomic_sub(skb->truesize, &nf_init_frags.mem); nf_skb_free(skb); kfree_skb(skb); @@ -271,6 +269,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, * in the chain of fragments so far. We must know where to put * this fragment, right? */ + prev = fq->q.fragments_tail; + if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) { + next = NULL; + goto found; + } prev = NULL; for (next = fq->q.fragments; next != NULL; next = next->next) { if (NFCT_FRAG6_CB(next)->offset >= offset) @@ -278,6 +281,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, prev = next; } +found: /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. @@ -335,7 +339,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, fq->q.fragments = next; fq->q.meat -= free_it->len; - frag_kfree_skb(free_it, NULL); + frag_kfree_skb(free_it); } } @@ -343,6 +347,8 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, /* Insert this fragment in the chain of fragments. */ skb->next = next; + if (!next) + fq->q.fragments_tail = skb; if (prev) prev->next = skb; else @@ -442,7 +448,6 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); - atomic_sub(head->truesize, &nf_init_frags.mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; @@ -452,8 +457,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; - atomic_sub(fp->truesize, &nf_init_frags.mem); } + atomic_sub(head->truesize, &nf_init_frags.mem); head->next = NULL; head->dev = dev; @@ -467,6 +472,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) head->csum); fq->q.fragments = NULL; + fq->q.fragments_tail = NULL; /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ fp = skb_shinfo(head)->frag_list; diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 566798d69f37d223016cfc31a398d0649dad68bf..d082eaeefa25a039b5af47f4715e05f08e4ce813 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -174,17 +174,28 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, const struct snmp_mib *itemlist) { int i; - for (i=0; itemlist[i].name; i++) + + for (i = 0; itemlist[i].name; i++) seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, snmp_fold_field(mib, itemlist[i].entry)); } +static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib, + const struct snmp_mib *itemlist, size_t syncpoff) +{ + int i; + + for (i = 0; itemlist[i].name; i++) + seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name, + snmp_fold_field64(mib, itemlist[i].entry, syncpoff)); +} + static int snmp6_seq_show(struct seq_file *seq, void *v) { struct net *net = (struct net *)seq->private; - snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics, - snmp6_ipstats_list); + snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics, + snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, snmp6_icmp6_list); snmp6_seq_show_icmpv6msg(seq, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 4a4dcbe4f8b22bdc813cc75aaf821b023952d187..e677937a07fc2e4e4a28cd6b3ac044a808d3d355 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -602,31 +602,33 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, } static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, - struct flowi *fl, struct rt6_info *rt, + struct flowi *fl, struct dst_entry **dstp, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *iph; struct sk_buff *skb; int err; + struct rt6_info *rt = (struct rt6_info *)*dstp; - if (length > rt->u.dst.dev->mtu) { - ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu); + if (length > rt->dst.dev->mtu) { + ipv6_local_error(sk, EMSGSIZE, fl, rt->dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; skb = sock_alloc_send_skb(sk, - length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, + length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; - skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); + skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - skb_dst_set(skb, dst_clone(&rt->u.dst)); + skb_dst_set(skb, &rt->dst); + *dstp = NULL; skb_put(skb, length); skb_reset_network_header(skb); @@ -641,7 +643,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, - rt->u.dst.dev, dst_output); + rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) @@ -725,7 +727,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, { struct ipv6_txoptions opt_space; struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; - struct in6_addr *daddr, *final_p = NULL, final; + struct in6_addr *daddr, *final_p, final; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct raw6_sock *rp = raw6_sk(sk); @@ -847,13 +849,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) ipv6_addr_copy(&fl.fl6_src, &np->saddr); - /* merge ip6_build_xmit from ip6_output */ - if (opt && opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, opt, &final); if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; @@ -892,9 +888,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, goto do_confirm; back_from_confirm: - if (inet->hdrincl) { - err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags); - } else { + if (inet->hdrincl) + err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, &dst, msg->msg_flags); + else { lock_sock(sk); err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 6d4292ff585463178885153cdd2fe6e044b962a9..545c4141b755ee91277c6bd2912de4f42813bb05 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -150,11 +150,8 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) EXPORT_SYMBOL(ip6_frag_match); /* Memory Tracking Functions. */ -static inline void frag_kfree_skb(struct netns_frags *nf, - struct sk_buff *skb, int *work) +static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) { - if (work) - *work -= skb->truesize; atomic_sub(skb->truesize, &nf->mem); kfree_skb(skb); } @@ -336,6 +333,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, * in the chain of fragments so far. We must know where to put * this fragment, right? */ + prev = fq->q.fragments_tail; + if (!prev || FRAG6_CB(prev)->offset < offset) { + next = NULL; + goto found; + } prev = NULL; for(next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) @@ -343,6 +345,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, prev = next; } +found: /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. @@ -392,7 +395,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, fq->q.fragments = next; fq->q.meat -= free_it->len; - frag_kfree_skb(fq->q.net, free_it, NULL); + frag_kfree_skb(fq->q.net, free_it); } } @@ -400,6 +403,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, /* Insert this fragment in the chain of fragments. */ skb->next = next; + if (!next) + fq->q.fragments_tail = skb; if (prev) prev->next = skb; else @@ -466,6 +471,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, goto out_oom; fp->next = head->next; + if (!fp->next) + fq->q.fragments_tail = fp; prev->next = fp; skb_morph(head, fq->q.fragments); @@ -524,7 +531,6 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); - atomic_sub(head->truesize, &fq->q.net->mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; @@ -534,8 +540,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; - atomic_sub(fp->truesize, &fq->q.net->mem); } + atomic_sub(head->truesize, &fq->q.net->mem); head->next = NULL; head->dev = dev; @@ -553,6 +559,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; + fq->q.fragments_tail = NULL; return 1; out_oversize: diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 252d76199c41e7589b85416bef1145e407239126..8f2d0400cf8ae616ad4283a6d846d8fa4b684d40 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -126,16 +126,14 @@ static struct dst_ops ip6_dst_blackhole_ops = { }; static struct rt6_info ip6_null_entry_template = { - .u = { - .dst = { - .__refcnt = ATOMIC_INIT(1), - .__use = 1, - .obsolete = -1, - .error = -ENETUNREACH, - .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, - .input = ip6_pkt_discard, - .output = ip6_pkt_discard_out, - } + .dst = { + .__refcnt = ATOMIC_INIT(1), + .__use = 1, + .obsolete = -1, + .error = -ENETUNREACH, + .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, + .input = ip6_pkt_discard, + .output = ip6_pkt_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, @@ -149,16 +147,14 @@ static int ip6_pkt_prohibit(struct sk_buff *skb); static int ip6_pkt_prohibit_out(struct sk_buff *skb); static struct rt6_info ip6_prohibit_entry_template = { - .u = { - .dst = { - .__refcnt = ATOMIC_INIT(1), - .__use = 1, - .obsolete = -1, - .error = -EACCES, - .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, - .input = ip6_pkt_prohibit, - .output = ip6_pkt_prohibit_out, - } + .dst = { + .__refcnt = ATOMIC_INIT(1), + .__use = 1, + .obsolete = -1, + .error = -EACCES, + .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, + .input = ip6_pkt_prohibit, + .output = ip6_pkt_prohibit_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, @@ -167,16 +163,14 @@ static struct rt6_info ip6_prohibit_entry_template = { }; static struct rt6_info ip6_blk_hole_entry_template = { - .u = { - .dst = { - .__refcnt = ATOMIC_INIT(1), - .__use = 1, - .obsolete = -1, - .error = -EINVAL, - .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, - .input = dst_discard, - .output = dst_discard, - } + .dst = { + .__refcnt = ATOMIC_INIT(1), + .__use = 1, + .obsolete = -1, + .error = -EINVAL, + .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, + .input = dst_discard, + .output = dst_discard, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, @@ -249,7 +243,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net, if (!oif && ipv6_addr_any(saddr)) goto out; - for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { + for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) { struct net_device *dev = sprt->rt6i_dev; if (oif) { @@ -407,10 +401,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn, match = NULL; for (rt = rr_head; rt && rt->rt6i_metric == metric; - rt = rt->u.dst.rt6_next) + rt = rt->dst.rt6_next) match = find_match(rt, oif, strict, &mpri, match); for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric; - rt = rt->u.dst.rt6_next) + rt = rt->dst.rt6_next) match = find_match(rt, oif, strict, &mpri, match); return match; @@ -432,7 +426,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) if (!match && (strict & RT6_LOOKUP_F_REACHABLE)) { - struct rt6_info *next = rt0->u.dst.rt6_next; + struct rt6_info *next = rt0->dst.rt6_next; /* no entries matched; do round-robin */ if (!next || next->rt6i_metric != rt0->rt6i_metric) @@ -517,7 +511,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, rt->rt6i_expires = jiffies + HZ * lifetime; rt->rt6i_flags |= RTF_EXPIRES; } - dst_release(&rt->u.dst); + dst_release(&rt->dst); } return 0; } @@ -555,7 +549,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags); BACKTRACK(net, &fl->fl6_src); out: - dst_use(&rt->u.dst, jiffies); + dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); return rt; @@ -643,7 +637,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); rt->rt6i_dst.plen = 128; rt->rt6i_flags |= RTF_CACHE; - rt->u.dst.flags |= DST_HOST; + rt->dst.flags |= DST_HOST; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { @@ -677,7 +671,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad if (net_ratelimit()) printk(KERN_WARNING "Neighbour table overflow.\n"); - dst_free(&rt->u.dst); + dst_free(&rt->dst); return NULL; } rt->rt6i_nexthop = neigh; @@ -694,7 +688,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); rt->rt6i_dst.plen = 128; rt->rt6i_flags |= RTF_CACHE; - rt->u.dst.flags |= DST_HOST; + rt->dst.flags |= DST_HOST; rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop); } return rt; @@ -726,7 +720,7 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, rt->rt6i_flags & RTF_CACHE) goto out; - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) @@ -739,10 +733,10 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, #endif } - dst_release(&rt->u.dst); + dst_release(&rt->dst); rt = nrt ? : net->ipv6.ip6_null_entry; - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); if (nrt) { err = ip6_ins_rt(nrt); if (!err) @@ -756,7 +750,7 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, * Race condition! In the gap, when table->tb6_lock was * released someone could insert this route. Relookup. */ - dst_release(&rt->u.dst); + dst_release(&rt->dst); goto relookup; out: @@ -764,11 +758,11 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, reachable = 0; goto restart_2; } - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); out2: - rt->u.dst.lastuse = jiffies; - rt->u.dst.__use++; + rt->dst.lastuse = jiffies; + rt->dst.__use++; return rt; } @@ -835,15 +829,15 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl struct dst_entry *new = NULL; if (rt) { - new = &rt->u.dst; + new = &rt->dst; atomic_set(&new->__refcnt, 1); new->__use = 1; new->input = dst_discard; new->output = dst_discard; - memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); - new->dev = ort->u.dst.dev; + memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); + new->dev = ort->dst.dev; if (new->dev) dev_hold(new->dev); rt->rt6i_idev = ort->rt6i_idev; @@ -912,7 +906,7 @@ static void ip6_link_failure(struct sk_buff *skb) rt = (struct rt6_info *) skb_dst(skb); if (rt) { if (rt->rt6i_flags&RTF_CACHE) { - dst_set_expires(&rt->u.dst, 0); + dst_set_expires(&rt->dst, 0); rt->rt6i_flags |= RTF_EXPIRES; } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) rt->rt6i_node->fn_sernum = -1; @@ -986,14 +980,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, rt->rt6i_dev = dev; rt->rt6i_idev = idev; rt->rt6i_nexthop = neigh; - atomic_set(&rt->u.dst.__refcnt, 1); - rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; - rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); - rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); - rt->u.dst.output = ip6_output; + atomic_set(&rt->dst.__refcnt, 1); + rt->dst.metrics[RTAX_HOPLIMIT-1] = 255; + rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); + rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); + rt->dst.output = ip6_output; #if 0 /* there's no chance to use these for ndisc */ - rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST + rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST ? DST_HOST : 0; ipv6_addr_copy(&rt->rt6i_dst.addr, addr); @@ -1001,14 +995,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, #endif spin_lock_bh(&icmp6_dst_lock); - rt->u.dst.next = icmp6_dst_gc_list; - icmp6_dst_gc_list = &rt->u.dst; + rt->dst.next = icmp6_dst_gc_list; + icmp6_dst_gc_list = &rt->dst; spin_unlock_bh(&icmp6_dst_lock); fib6_force_start_gc(net); out: - return &rt->u.dst; + return &rt->dst; } int icmp6_dst_gc(void) @@ -1090,11 +1084,11 @@ static int ipv6_get_mtu(struct net_device *dev) int mtu = IPV6_MIN_MTU; struct inet6_dev *idev; - idev = in6_dev_get(dev); - if (idev) { + rcu_read_lock(); + idev = __in6_dev_get(dev); + if (idev) mtu = idev->cnf.mtu6; - in6_dev_put(idev); - } + rcu_read_unlock(); return mtu; } @@ -1103,12 +1097,15 @@ int ip6_dst_hoplimit(struct dst_entry *dst) int hoplimit = dst_metric(dst, RTAX_HOPLIMIT); if (hoplimit < 0) { struct net_device *dev = dst->dev; - struct inet6_dev *idev = in6_dev_get(dev); - if (idev) { + struct inet6_dev *idev; + + rcu_read_lock(); + idev = __in6_dev_get(dev); + if (idev) hoplimit = idev->cnf.hop_limit; - in6_dev_put(idev); - } else + else hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; + rcu_read_unlock(); } return hoplimit; } @@ -1159,7 +1156,7 @@ int ip6_route_add(struct fib6_config *cfg) goto out; } - rt->u.dst.obsolete = -1; + rt->dst.obsolete = -1; rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ? jiffies + clock_t_to_jiffies(cfg->fc_expires) : 0; @@ -1171,16 +1168,16 @@ int ip6_route_add(struct fib6_config *cfg) addr_type = ipv6_addr_type(&cfg->fc_dst); if (addr_type & IPV6_ADDR_MULTICAST) - rt->u.dst.input = ip6_mc_input; + rt->dst.input = ip6_mc_input; else - rt->u.dst.input = ip6_forward; + rt->dst.input = ip6_forward; - rt->u.dst.output = ip6_output; + rt->dst.output = ip6_output; ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); rt->rt6i_dst.plen = cfg->fc_dst_len; if (rt->rt6i_dst.plen == 128) - rt->u.dst.flags = DST_HOST; + rt->dst.flags = DST_HOST; #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); @@ -1208,9 +1205,9 @@ int ip6_route_add(struct fib6_config *cfg) goto out; } } - rt->u.dst.output = ip6_pkt_discard_out; - rt->u.dst.input = ip6_pkt_discard; - rt->u.dst.error = -ENETUNREACH; + rt->dst.output = ip6_pkt_discard_out; + rt->dst.input = ip6_pkt_discard; + rt->dst.error = -ENETUNREACH; rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; goto install_route; } @@ -1244,7 +1241,7 @@ int ip6_route_add(struct fib6_config *cfg) goto out; if (dev) { if (dev != grt->rt6i_dev) { - dst_release(&grt->u.dst); + dst_release(&grt->dst); goto out; } } else { @@ -1255,7 +1252,7 @@ int ip6_route_add(struct fib6_config *cfg) } if (!(grt->rt6i_flags&RTF_GATEWAY)) err = 0; - dst_release(&grt->u.dst); + dst_release(&grt->dst); if (err) goto out; @@ -1294,18 +1291,18 @@ int ip6_route_add(struct fib6_config *cfg) goto out; } - rt->u.dst.metrics[type - 1] = nla_get_u32(nla); + rt->dst.metrics[type - 1] = nla_get_u32(nla); } } } - if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) - rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; - if (!dst_mtu(&rt->u.dst)) - rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); - if (!dst_metric(&rt->u.dst, RTAX_ADVMSS)) - rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); - rt->u.dst.dev = dev; + if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0) + rt->dst.metrics[RTAX_HOPLIMIT-1] = -1; + if (!dst_mtu(&rt->dst)) + rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); + if (!dst_metric(&rt->dst, RTAX_ADVMSS)) + rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); + rt->dst.dev = dev; rt->rt6i_idev = idev; rt->rt6i_table = table; @@ -1319,7 +1316,7 @@ int ip6_route_add(struct fib6_config *cfg) if (idev) in6_dev_put(idev); if (rt) - dst_free(&rt->u.dst); + dst_free(&rt->dst); return err; } @@ -1336,7 +1333,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) write_lock_bh(&table->tb6_lock); err = fib6_del(rt, info); - dst_release(&rt->u.dst); + dst_release(&rt->dst); write_unlock_bh(&table->tb6_lock); @@ -1369,7 +1366,7 @@ static int ip6_route_del(struct fib6_config *cfg) &cfg->fc_src, cfg->fc_src_len); if (fn) { - for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { + for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if (cfg->fc_ifindex && (rt->rt6i_dev == NULL || rt->rt6i_dev->ifindex != cfg->fc_ifindex)) @@ -1379,7 +1376,7 @@ static int ip6_route_del(struct fib6_config *cfg) continue; if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) continue; - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); return __ip6_del_rt(rt, &cfg->fc_nlinfo); @@ -1421,7 +1418,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); restart: - for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { + for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { /* * Current route is on-link; redirect is always invalid. * @@ -1445,7 +1442,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, rt = net->ipv6.ip6_null_entry; BACKTRACK(net, &fl->fl6_src); out: - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); @@ -1513,10 +1510,10 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, * Look, redirects are sent only in response to data packets, * so that this nexthop apparently is reachable. --ANK */ - dst_confirm(&rt->u.dst); + dst_confirm(&rt->dst); /* Duplicate redirect: silently ignore. */ - if (neigh == rt->u.dst.neighbour) + if (neigh == rt->dst.neighbour) goto out; nrt = ip6_rt_copy(rt); @@ -1529,20 +1526,20 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, ipv6_addr_copy(&nrt->rt6i_dst.addr, dest); nrt->rt6i_dst.plen = 128; - nrt->u.dst.flags |= DST_HOST; + nrt->dst.flags |= DST_HOST; ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); nrt->rt6i_nexthop = neigh_clone(neigh); /* Reset pmtu, it may be better */ - nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); - nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev), - dst_mtu(&nrt->u.dst)); + nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); + nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev), + dst_mtu(&nrt->dst)); if (ip6_ins_rt(nrt)) goto out; - netevent.old = &rt->u.dst; - netevent.new = &nrt->u.dst; + netevent.old = &rt->dst; + netevent.new = &nrt->dst; call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); if (rt->rt6i_flags&RTF_CACHE) { @@ -1551,7 +1548,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, } out: - dst_release(&rt->u.dst); + dst_release(&rt->dst); } /* @@ -1570,7 +1567,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, if (rt == NULL) return; - if (pmtu >= dst_mtu(&rt->u.dst)) + if (pmtu >= dst_mtu(&rt->dst)) goto out; if (pmtu < IPV6_MIN_MTU) { @@ -1588,7 +1585,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, They are sent only in response to data packets, so that this nexthop apparently is reachable. --ANK */ - dst_confirm(&rt->u.dst); + dst_confirm(&rt->dst); /* Host route. If it is static, it would be better not to override it, but add new one, so that @@ -1596,10 +1593,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, would return automatically. */ if (rt->rt6i_flags & RTF_CACHE) { - rt->u.dst.metrics[RTAX_MTU-1] = pmtu; + rt->dst.metrics[RTAX_MTU-1] = pmtu; if (allfrag) - rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; - dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); + rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; + dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; goto out; } @@ -1615,9 +1612,9 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, nrt = rt6_alloc_clone(rt, daddr); if (nrt) { - nrt->u.dst.metrics[RTAX_MTU-1] = pmtu; + nrt->dst.metrics[RTAX_MTU-1] = pmtu; if (allfrag) - nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; + nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; /* According to RFC 1981, detecting PMTU increase shouldn't be * happened within 5 mins, the recommended timer is 10 mins. @@ -1625,13 +1622,13 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, * which is 10 mins. After 10 mins the decreased pmtu is expired * and detecting PMTU increase will be automatically happened. */ - dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); + dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; ip6_ins_rt(nrt); } out: - dst_release(&rt->u.dst); + dst_release(&rt->dst); } /* @@ -1644,18 +1641,18 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); if (rt) { - rt->u.dst.input = ort->u.dst.input; - rt->u.dst.output = ort->u.dst.output; - - memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); - rt->u.dst.error = ort->u.dst.error; - rt->u.dst.dev = ort->u.dst.dev; - if (rt->u.dst.dev) - dev_hold(rt->u.dst.dev); + rt->dst.input = ort->dst.input; + rt->dst.output = ort->dst.output; + + memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); + rt->dst.error = ort->dst.error; + rt->dst.dev = ort->dst.dev; + if (rt->dst.dev) + dev_hold(rt->dst.dev); rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); - rt->u.dst.lastuse = jiffies; + rt->dst.lastuse = jiffies; rt->rt6i_expires = 0; ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); @@ -1689,14 +1686,14 @@ static struct rt6_info *rt6_get_route_info(struct net *net, if (!fn) goto out; - for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { + for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if (rt->rt6i_dev->ifindex != ifindex) continue; if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) continue; if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) continue; - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); break; } out: @@ -1744,14 +1741,14 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d return NULL; write_lock_bh(&table->tb6_lock); - for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) { + for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { if (dev == rt->rt6i_dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->rt6i_gateway, addr)) break; } if (rt) - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); write_unlock_bh(&table->tb6_lock); return rt; } @@ -1790,9 +1787,9 @@ void rt6_purge_dflt_routers(struct net *net) restart: read_lock_bh(&table->tb6_lock); - for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) { + for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { - dst_hold(&rt->u.dst); + dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); ip6_del_rt(rt); goto restart; @@ -1930,15 +1927,15 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, dev_hold(net->loopback_dev); in6_dev_hold(idev); - rt->u.dst.flags = DST_HOST; - rt->u.dst.input = ip6_input; - rt->u.dst.output = ip6_output; + rt->dst.flags = DST_HOST; + rt->dst.input = ip6_input; + rt->dst.output = ip6_output; rt->rt6i_dev = net->loopback_dev; rt->rt6i_idev = idev; - rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); - rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); - rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; - rt->u.dst.obsolete = -1; + rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); + rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); + rt->dst.metrics[RTAX_HOPLIMIT-1] = -1; + rt->dst.obsolete = -1; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) @@ -1947,7 +1944,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->rt6i_flags |= RTF_LOCAL; neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); if (IS_ERR(neigh)) { - dst_free(&rt->u.dst); + dst_free(&rt->dst); /* We are casting this because that is the return * value type. But an errno encoded pointer is the @@ -1962,7 +1959,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->rt6i_dst.plen = 128; rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); - atomic_set(&rt->u.dst.__refcnt, 1); + atomic_set(&rt->dst.__refcnt, 1); return rt; } @@ -2033,12 +2030,12 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) PMTU discouvery. */ if (rt->rt6i_dev == arg->dev && - !dst_metric_locked(&rt->u.dst, RTAX_MTU) && - (dst_mtu(&rt->u.dst) >= arg->mtu || - (dst_mtu(&rt->u.dst) < arg->mtu && - dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { - rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; - rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu); + !dst_metric_locked(&rt->dst, RTAX_MTU) && + (dst_mtu(&rt->dst) >= arg->mtu || + (dst_mtu(&rt->dst) < arg->mtu && + dst_mtu(&rt->dst) == idev->cnf.mtu6))) { + rt->dst.metrics[RTAX_MTU-1] = arg->mtu; + rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu); } return 0; } @@ -2252,20 +2249,20 @@ static int rt6_fill_node(struct net *net, #endif NLA_PUT_U32(skb, RTA_IIF, iif); } else if (dst) { - struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); + struct inet6_dev *idev = ip6_dst_idev(&rt->dst); struct in6_addr saddr_buf; if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, dst, 0, &saddr_buf) == 0) NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); } - if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) + if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) goto nla_put_failure; - if (rt->u.dst.neighbour) - NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key); + if (rt->dst.neighbour) + NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key); - if (rt->u.dst.dev) + if (rt->dst.dev) NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); @@ -2277,8 +2274,8 @@ static int rt6_fill_node(struct net *net, else expires = INT_MAX; - if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, - expires, rt->u.dst.error) < 0) + if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, + expires, rt->dst.error) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -2364,7 +2361,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).pid, @@ -2416,12 +2413,12 @@ static int ip6_route_dev_notify(struct notifier_block *this, struct net *net = dev_net(dev); if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { - net->ipv6.ip6_null_entry->u.dst.dev = dev; + net->ipv6.ip6_null_entry->dst.dev = dev; net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES - net->ipv6.ip6_prohibit_entry->u.dst.dev = dev; + net->ipv6.ip6_prohibit_entry->dst.dev = dev; net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); - net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev; + net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); #endif } @@ -2464,8 +2461,8 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) seq_puts(m, "00000000000000000000000000000000"); } seq_printf(m, " %08x %08x %08x %08x %8s\n", - rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), - rt->u.dst.__use, rt->rt6i_flags, + rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), + rt->dst.__use, rt->rt6i_flags, rt->rt6i_dev ? rt->rt6i_dev->name : ""); return 0; } @@ -2646,9 +2643,9 @@ static int __net_init ip6_route_net_init(struct net *net) GFP_KERNEL); if (!net->ipv6.ip6_null_entry) goto out_ip6_dst_ops; - net->ipv6.ip6_null_entry->u.dst.path = + net->ipv6.ip6_null_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_null_entry; - net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; + net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, @@ -2656,18 +2653,18 @@ static int __net_init ip6_route_net_init(struct net *net) GFP_KERNEL); if (!net->ipv6.ip6_prohibit_entry) goto out_ip6_null_entry; - net->ipv6.ip6_prohibit_entry->u.dst.path = + net->ipv6.ip6_prohibit_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_prohibit_entry; - net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; + net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, sizeof(*net->ipv6.ip6_blk_hole_entry), GFP_KERNEL); if (!net->ipv6.ip6_blk_hole_entry) goto out_ip6_prohibit_entry; - net->ipv6.ip6_blk_hole_entry->u.dst.path = + net->ipv6.ip6_blk_hole_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; - net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; + net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; #endif net->ipv6.sysctl.flush_delay = 0; @@ -2742,12 +2739,12 @@ int __init ip6_route_init(void) /* Registering of the loopback is done before this portion of code, * the loopback reference in rt6_info will not be taken, do it * manually for init_net */ - init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev; + init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES - init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev; + init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); - init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev; + init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #endif ret = fib6_init(); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e51e650ea80be7e4e2e0e0fe8514e3f14f0f1890..4699cd3c3118ffefb90d4f1e587683aedddcc866 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -249,8 +249,6 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, return NULL; } -static DEFINE_SPINLOCK(ipip6_prl_lock); - #define for_each_prl_rcu(start) \ for (prl = rcu_dereference(start); \ prl; \ @@ -340,7 +338,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) if (a->addr == htonl(INADDR_ANY)) return -EINVAL; - spin_lock(&ipip6_prl_lock); + ASSERT_RTNL(); for (p = t->prl; p; p = p->next) { if (p->addr == a->addr) { @@ -370,7 +368,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) t->prl_count++; rcu_assign_pointer(t->prl, p); out: - spin_unlock(&ipip6_prl_lock); return err; } @@ -397,7 +394,7 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) struct ip_tunnel_prl_entry *x, **p; int err = 0; - spin_lock(&ipip6_prl_lock); + ASSERT_RTNL(); if (a && a->addr != htonl(INADDR_ANY)) { for (p = &t->prl; *p; p = &(*p)->next) { @@ -419,7 +416,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) } } out: - spin_unlock(&ipip6_prl_lock); return err; } @@ -716,7 +712,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, stats->tx_carrier_errors++; goto tx_error_icmp; } - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; if (tdev == dev) { ip_rt_put(rt); @@ -725,7 +721,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, } if (df) { - mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); + mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); if (mtu < 68) { stats->collisions++; @@ -784,7 +780,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags = 0; skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. @@ -833,7 +829,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) .proto = IPPROTO_IPV6 }; struct rtable *rt; if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; ip_rt_put(rt); } dev->flags |= IFF_POINTOPOINT; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 34d1f0690d7e4f6031172fd4640da207b07c25b3..09fd34f0dbf2e18efbc7c1d3b3681eee0ded6f3e 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -27,28 +27,17 @@ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -/* - * This table has to be sorted and terminated with (__u16)-1. - * XXX generate a better table. - * Unresolved Issues: HIPPI with a 64k MSS is not well supported. - * - * Taken directly from ipv4 implementation. - * Should this list be modified for ipv6 use or is it close enough? - * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart - */ +/* Table must be sorted. */ static __u16 const msstab[] = { - 64 - 1, - 256 - 1, - 512 - 1, - 536 - 1, - 1024 - 1, - 1440 - 1, - 1460 - 1, - 4312 - 1, - (__u16)-1 + 64, + 512, + 536, + 1280 - 60, + 1480 - 60, + 1500 - 60, + 4460 - 60, + 9000 - 60, }; -/* The number doesn't include the -1 terminator */ -#define NUM_MSS (ARRAY_SIZE(msstab) - 1) /* * This (misnamed) value is the age of syncookie which is permitted. @@ -134,9 +123,11 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) tcp_synq_overflow(sk); - for (mssind = 0; mss > msstab[mssind + 1]; mssind++) - ; - *mssp = msstab[mssind] + 1; + for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) + if (mss >= msstab[mssind]) + break; + + *mssp = msstab[mssind]; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); @@ -154,7 +145,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) th->source, th->dest, seq, jiffies / (HZ * 60), COUNTER_TRIES); - return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; + return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; } struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) @@ -173,8 +164,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) int mss; struct dst_entry *dst; __u8 rcv_wscale; + bool ecn_ok; - if (!sysctl_tcp_syncookies || !th->ack) + if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk) || @@ -189,8 +181,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, &hash_location, 0); - if (tcp_opt.saw_tstamp) - cookie_check_timestamp(&tcp_opt); + if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) + goto out; ret = NULL; req = inet6_reqsk_alloc(&tcp6_request_sock_ops); @@ -224,9 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) req->expires = 0UL; req->retrans = 0; - ireq->ecn_ok = 0; + ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; - ireq->rcv_wscale = tcp_opt.rcv_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; @@ -240,17 +231,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) * me if there is a preferred way. */ { - struct in6_addr *final_p = NULL, final; + struct in6_addr *final_p, final; struct flowi fl; memset(&fl, 0, sizeof(fl)); fl.proto = IPPROTO_TCP; ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); - if (np->opt && np->opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, np->opt, &final); ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); fl.oif = sk->sk_bound_dev_if; fl.mark = sk->sk_mark; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2b7c3a100e2c327f3e3c057e309733e221b22061..fe6d40418c0b8fdba3b58fd7b88d3fedc2a7f6a2 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -129,7 +129,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, struct inet_connection_sock *icsk = inet_csk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct in6_addr *saddr = NULL, *final_p = NULL, final; + struct in6_addr *saddr = NULL, *final_p, final; struct flowi fl; struct dst_entry *dst; int addr_type; @@ -250,12 +250,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl.fl_ip_dport = usin->sin6_port; fl.fl_ip_sport = inet->inet_sport; - if (np->opt && np->opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, np->opt, &final); security_sk_classify_flow(sk, &fl); @@ -477,7 +472,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff * skb; struct ipv6_txoptions *opt = NULL; - struct in6_addr * final_p = NULL, final; + struct in6_addr * final_p, final; struct flowi fl; struct dst_entry *dst; int err = -1; @@ -494,12 +489,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, security_req_classify_flow(req, &fl); opt = np->opt; - if (opt && opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, opt, &final); err = ip6_dst_lookup(sk, &dst, &fl); if (err) @@ -1167,7 +1157,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) } #ifdef CONFIG_SYN_COOKIES - if (!th->rst && !th->syn && th->ack) + if (!th->syn) sk = cookie_v6_check(sk, skb); #endif return sk; @@ -1279,13 +1269,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) treq = inet6_rsk(req); ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); - if (!want_cookie) + if (!want_cookie || tmp_opt.tstamp_ok) TCP_ECN_create_request(req, tcp_hdr(skb)); - if (want_cookie) { - isn = cookie_v6_init_sequence(sk, skb, &req->mss); - req->cookie_ts = tmp_opt.tstamp_ok; - } else if (!isn) { + if (!isn) { if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { @@ -1298,8 +1285,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!sk->sk_bound_dev_if && ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) treq->iif = inet6_iif(skb); - - isn = tcp_v6_init_sequence(skb); + if (!want_cookie) { + isn = tcp_v6_init_sequence(skb); + } else { + isn = cookie_v6_init_sequence(sk, skb, &req->mss); + req->cookie_ts = tmp_opt.tstamp_ok; + } } tcp_rsk(req)->snt_isn = isn; @@ -1392,18 +1383,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, goto out_overflow; if (dst == NULL) { - struct in6_addr *final_p = NULL, final; + struct in6_addr *final_p, final; struct flowi fl; memset(&fl, 0, sizeof(fl)); fl.proto = IPPROTO_TCP; ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); - if (opt && opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; - } + final_p = fl6_update_dst(&fl, opt, &final); ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); fl.oif = sk->sk_bound_dev_if; fl.mark = sk->sk_mark; @@ -2156,6 +2142,8 @@ struct proto tcpv6_prot = { .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, + .sendmsg = tcp_sendmsg, + .sendpage = tcp_sendpage, .backlog_rcv = tcp_v6_do_rcv, .hash = tcp_v6_hash, .unhash = inet_unhash, @@ -2174,6 +2162,7 @@ struct proto tcpv6_prot = { .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = &tcp_hashinfo, + .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 87be58673b55fffc75e34ec0d6cc1dceafd7de8e..1dd1affdead2d418a3d98a0f34e5d027cab88371 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -927,7 +927,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; - struct in6_addr *daddr, *final_p = NULL, final; + struct in6_addr *daddr, *final_p, final; struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct flowi fl; @@ -1097,14 +1097,9 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, ipv6_addr_copy(&fl.fl6_src, &np->saddr); fl.fl_ip_sport = inet->inet_sport; - /* merge ip6_build_xmit from ip6_output */ - if (opt && opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; - ipv6_addr_copy(&final, &fl.fl6_dst); - ipv6_addr_copy(&fl.fl6_dst, rt0->addr); - final_p = &final; + final_p = fl6_update_dst(&fl, opt, &final); + if (final_p) connected = 0; - } if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) { fl.oif = np->mcast_oif; diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 6a1a202710c51203597fc291244d1a5d9b5df1d6..800bc53b7f63d36fdeeadf81ccdde5870b99dc9c 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -527,7 +527,7 @@ static int dev_irnet_close(struct inode * inode, struct file * file) { - irnet_socket * ap = (struct irnet_socket *) file->private_data; + irnet_socket * ap = file->private_data; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); @@ -564,7 +564,7 @@ dev_irnet_write(struct file * file, size_t count, loff_t * ppos) { - irnet_socket * ap = (struct irnet_socket *) file->private_data; + irnet_socket * ap = file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); @@ -588,7 +588,7 @@ dev_irnet_read(struct file * file, size_t count, loff_t * ppos) { - irnet_socket * ap = (struct irnet_socket *) file->private_data; + irnet_socket * ap = file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); @@ -609,7 +609,7 @@ static unsigned int dev_irnet_poll(struct file * file, poll_table * wait) { - irnet_socket * ap = (struct irnet_socket *) file->private_data; + irnet_socket * ap = file->private_data; unsigned int mask; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", @@ -638,7 +638,7 @@ dev_irnet_ioctl( unsigned int cmd, unsigned long arg) { - irnet_socket * ap = (struct irnet_socket *) file->private_data; + irnet_socket * ap = file->private_data; int err; int val; void __user *argp = (void __user *)arg; diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 47db1d8a0d92088a5982b2db471c9fce05001de2..285761e77d90f98ee15c6b47b34c8ead10692e33 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -1853,23 +1853,23 @@ static int irttp_seq_show(struct seq_file *seq, void *v) self->remote_credit); seq_printf(seq, "send credit: %d\n", self->send_credit); - seq_printf(seq, " tx packets: %ld, ", + seq_printf(seq, " tx packets: %lu, ", self->stats.tx_packets); - seq_printf(seq, "rx packets: %ld, ", + seq_printf(seq, "rx packets: %lu, ", self->stats.rx_packets); - seq_printf(seq, "tx_queue len: %d ", + seq_printf(seq, "tx_queue len: %u ", skb_queue_len(&self->tx_queue)); - seq_printf(seq, "rx_queue len: %d\n", + seq_printf(seq, "rx_queue len: %u\n", skb_queue_len(&self->rx_queue)); seq_printf(seq, " tx_sdu_busy: %s, ", self->tx_sdu_busy? "TRUE":"FALSE"); seq_printf(seq, "rx_sdu_busy: %s\n", self->rx_sdu_busy? "TRUE":"FALSE"); - seq_printf(seq, " max_seg_size: %d, ", + seq_printf(seq, " max_seg_size: %u, ", self->max_seg_size); - seq_printf(seq, "tx_max_sdu_size: %d, ", + seq_printf(seq, "tx_max_sdu_size: %u, ", self->tx_max_sdu_size); - seq_printf(seq, "rx_max_sdu_size: %d\n", + seq_printf(seq, "rx_max_sdu_size: %u\n", self->rx_max_sdu_size); seq_printf(seq, " Used by (%s)\n\n", diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index f28ad2cc8428b0f02071c44f1e1b793609183bba..499c045d69102aa45dc3a2d1f50cb55dc55a2c8f 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -1463,7 +1463,7 @@ struct iucv_path_pending { u32 res3; u8 ippollfg; u8 res4[3]; -} __attribute__ ((packed)); +} __packed; static void iucv_path_pending(struct iucv_irq_data *data) { @@ -1524,7 +1524,7 @@ struct iucv_path_complete { u32 res3; u8 ippollfg; u8 res4[3]; -} __attribute__ ((packed)); +} __packed; static void iucv_path_complete(struct iucv_irq_data *data) { @@ -1554,7 +1554,7 @@ struct iucv_path_severed { u32 res4; u8 ippollfg; u8 res5[3]; -} __attribute__ ((packed)); +} __packed; static void iucv_path_severed(struct iucv_irq_data *data) { @@ -1590,7 +1590,7 @@ struct iucv_path_quiesced { u32 res4; u8 ippollfg; u8 res5[3]; -} __attribute__ ((packed)); +} __packed; static void iucv_path_quiesced(struct iucv_irq_data *data) { @@ -1618,7 +1618,7 @@ struct iucv_path_resumed { u32 res4; u8 ippollfg; u8 res5[3]; -} __attribute__ ((packed)); +} __packed; static void iucv_path_resumed(struct iucv_irq_data *data) { @@ -1649,7 +1649,7 @@ struct iucv_message_complete { u32 ipbfln2f; u8 ippollfg; u8 res2[3]; -} __attribute__ ((packed)); +} __packed; static void iucv_message_complete(struct iucv_irq_data *data) { @@ -1694,7 +1694,7 @@ struct iucv_message_pending { u32 ipbfln2f; u8 ippollfg; u8 res2[3]; -} __attribute__ ((packed)); +} __packed; static void iucv_message_pending(struct iucv_irq_data *data) { diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0852512d392cb6ac7e9ed7bad87663154f70f64e..226a0ae3bcfd7891775ec83c7035d99e2539923f 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -348,7 +348,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len sk->sk_state = TCP_ESTABLISHED; inet->inet_id = jiffies; - sk_dst_set(sk, &rt->u.dst); + sk_dst_set(sk, &rt->dst); write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); @@ -496,9 +496,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) goto no_route; } - sk_setup_caps(sk, &rt->u.dst); + sk_setup_caps(sk, &rt->dst); } - skb_dst_set(skb, dst_clone(&rt->u.dst)); + skb_dst_set(skb, dst_clone(&rt->dst)); /* Queue the packet to IP for output */ rc = ip_queue_xmit(skb); diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 8a91f6c0bb1800a8149623c059bdde6e135361b7..4d6f8653ec8819a7e69cdb9cb447b77c1e7e7047 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -33,6 +33,13 @@ config MAC80211_RC_MINSTREL ---help--- This option enables the 'minstrel' TX rate control algorithm +config MAC80211_RC_MINSTREL_HT + bool "Minstrel 802.11n support" if EMBEDDED + depends on MAC80211_RC_MINSTREL + default y + ---help--- + This option enables the 'minstrel_ht' TX rate control algorithm + choice prompt "Default rate control algorithm" depends on MAC80211_HAS_RC @@ -62,6 +69,7 @@ endchoice config MAC80211_RC_DEFAULT string + default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL default "pid" if MAC80211_RC_DEFAULT_PID default "" diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 84b48ba8a77e559dcc5b1d8120f5428b4c07314c..fdb54e61d637d74ce81c45211f7334eabdc21dce 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -51,7 +51,11 @@ rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o rc80211_minstrel-y := rc80211_minstrel.o rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o +rc80211_minstrel_ht-y := rc80211_minstrel_ht.o +rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o + mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) +mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y) ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 6bb9a9a94960e9214e92f1debf023aa0ebb3b987..965b272499fd7616e4df039e3c878dda678f5cf9 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -6,39 +6,70 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu - * Copyright 2007-2008, Intel Corporation + * Copyright 2007-2010, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +/** + * DOC: RX A-MPDU aggregation + * + * Aggregation on the RX side requires only implementing the + * @ampdu_action callback that is invoked to start/stop any + * block-ack sessions for RX aggregation. + * + * When RX aggregation is started by the peer, the driver is + * notified via @ampdu_action function, with the + * %IEEE80211_AMPDU_RX_START action, and may reject the request + * in which case a negative response is sent to the peer, if it + * accepts it a positive response is sent. + * + * While the session is active, the device/driver are required + * to de-aggregate frames and pass them up one by one to mac80211, + * which will handle the reorder buffer. + * + * When the aggregation session is stopped again by the peer or + * ourselves, the driver's @ampdu_action function will be called + * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the + * call must not fail. + */ + #include #include #include #include "ieee80211_i.h" #include "driver-ops.h" -static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, - u16 initiator, u16 reason, - bool from_timer) +static void ieee80211_free_tid_rx(struct rcu_head *h) { - struct ieee80211_local *local = sta->local; - struct tid_ampdu_rx *tid_rx; + struct tid_ampdu_rx *tid_rx = + container_of(h, struct tid_ampdu_rx, rcu_head); int i; - spin_lock_bh(&sta->lock); + for (i = 0; i < tid_rx->buf_size; i++) + dev_kfree_skb(tid_rx->reorder_buf[i]); + kfree(tid_rx->reorder_buf); + kfree(tid_rx->reorder_time); + kfree(tid_rx); +} - /* check if TID is in operational state */ - if (!sta->ampdu_mlme.tid_active_rx[tid]) { - spin_unlock_bh(&sta->lock); - return; - } +void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason) +{ + struct ieee80211_local *local = sta->local; + struct tid_ampdu_rx *tid_rx; - sta->ampdu_mlme.tid_active_rx[tid] = false; + lockdep_assert_held(&sta->ampdu_mlme.mtx); tid_rx = sta->ampdu_mlme.tid_rx[tid]; + if (!tid_rx) + return; + + rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); + #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", sta->sta.addr, tid); @@ -54,32 +85,17 @@ static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, ieee80211_send_delba(sta->sdata, sta->sta.addr, tid, 0, reason); - /* free the reordering buffer */ - for (i = 0; i < tid_rx->buf_size; i++) { - if (tid_rx->reorder_buf[i]) { - /* release the reordered frames */ - dev_kfree_skb(tid_rx->reorder_buf[i]); - tid_rx->stored_mpdu_num--; - tid_rx->reorder_buf[i] = NULL; - } - } - - /* free resources */ - kfree(tid_rx->reorder_buf); - kfree(tid_rx->reorder_time); - sta->ampdu_mlme.tid_rx[tid] = NULL; - - spin_unlock_bh(&sta->lock); + del_timer_sync(&tid_rx->session_timer); - if (!from_timer) - del_timer_sync(&tid_rx->session_timer); - kfree(tid_rx); + call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); } void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason) { - ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false); + mutex_lock(&sta->ampdu_mlme.mtx); + ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason); + mutex_unlock(&sta->ampdu_mlme.mtx); } /* @@ -100,8 +116,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); #endif - ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT, - WLAN_REASON_QSTA_TIMEOUT, true); + set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); + ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); } static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, @@ -212,9 +228,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, /* examine state machine */ - spin_lock_bh(&sta->lock); + mutex_lock(&sta->ampdu_mlme.mtx); - if (sta->ampdu_mlme.tid_active_rx[tid]) { + if (sta->ampdu_mlme.tid_rx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "unexpected AddBA Req from " @@ -225,9 +241,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, } /* prepare A-MPDU MLME for Rx aggregation */ - sta->ampdu_mlme.tid_rx[tid] = - kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); - if (!sta->ampdu_mlme.tid_rx[tid]) { + tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); + if (!tid_agg_rx) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "allocate rx mlme to tid %d failed\n", @@ -235,14 +250,11 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, #endif goto end; } - /* rx timer */ - sta->ampdu_mlme.tid_rx[tid]->session_timer.function = - sta_rx_agg_session_timer_expired; - sta->ampdu_mlme.tid_rx[tid]->session_timer.data = - (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); - tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; + /* rx timer */ + tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; + tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; + init_timer(&tid_agg_rx->session_timer); /* prepare reordering buffer */ tid_agg_rx->reorder_buf = @@ -257,8 +269,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, #endif kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx->reorder_time); - kfree(sta->ampdu_mlme.tid_rx[tid]); - sta->ampdu_mlme.tid_rx[tid] = NULL; + kfree(tid_agg_rx); goto end; } @@ -270,13 +281,12 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, if (ret) { kfree(tid_agg_rx->reorder_buf); + kfree(tid_agg_rx->reorder_time); kfree(tid_agg_rx); - sta->ampdu_mlme.tid_rx[tid] = NULL; goto end; } - /* change state and send addba resp */ - sta->ampdu_mlme.tid_active_rx[tid] = true; + /* update data */ tid_agg_rx->dialog_token = dialog_token; tid_agg_rx->ssn = start_seq_num; tid_agg_rx->head_seq_num = start_seq_num; @@ -284,8 +294,15 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, tid_agg_rx->timeout = timeout; tid_agg_rx->stored_mpdu_num = 0; status = WLAN_STATUS_SUCCESS; + + /* activate it for RX */ + rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); + + if (timeout) + mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); + end: - spin_unlock_bh(&sta->lock); + mutex_unlock(&sta->ampdu_mlme.mtx); end_no_lock: ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 98258b7341e356111bef3674f56b3a6adeee5961..c893f236acea771076b5572b42c82162c3684913 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -6,7 +6,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu - * Copyright 2007-2009, Intel Corporation + * Copyright 2007-2010, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -21,28 +21,39 @@ #include "wme.h" /** - * DOC: TX aggregation + * DOC: TX A-MPDU aggregation * * Aggregation on the TX side requires setting the hardware flag - * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues - * hardware parameter to the number of hardware AMPDU queues. If there are no - * hardware queues then the driver will (currently) have to do all frame - * buffering. + * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed + * packets with a flag indicating A-MPDU aggregation. The driver + * or device is responsible for actually aggregating the frames, + * as well as deciding how many and which to aggregate. * - * When TX aggregation is started by some subsystem (usually the rate control - * algorithm would be appropriate) by calling the - * ieee80211_start_tx_ba_session() function, the driver will be notified via - * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action. + * When TX aggregation is started by some subsystem (usually the rate + * control algorithm would be appropriate) by calling the + * ieee80211_start_tx_ba_session() function, the driver will be + * notified via its @ampdu_action function, with the + * %IEEE80211_AMPDU_TX_START action. * * In response to that, the driver is later required to call the - * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe()) - * function, which will start the aggregation session. + * ieee80211_start_tx_ba_cb_irqsafe() function, which will really + * start the aggregation session after the peer has also responded. + * If the peer responds negatively, the session will be stopped + * again right away. Note that it is possible for the aggregation + * session to be stopped before the driver has indicated that it + * is done setting it up, in which case it must not indicate the + * setup completion. * - * Similarly, when the aggregation session is stopped by - * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will - * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the - * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb() - * (or ieee80211_stop_tx_ba_cb_irqsafe()). + * Also note that, since we also need to wait for a response from + * the peer, the driver is notified of the completion of the + * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the + * @ampdu_action callback. + * + * Similarly, when the aggregation session is stopped by the peer + * or something calling ieee80211_stop_tx_ba_session(), the driver's + * @ampdu_action function will be called with the action + * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, + * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). */ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, @@ -125,25 +136,53 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1 ieee80211_tx_skb(sdata, skb); } +static void kfree_tid_tx(struct rcu_head *rcu_head) +{ + struct tid_ampdu_tx *tid_tx = + container_of(rcu_head, struct tid_ampdu_tx, rcu_head); + + kfree(tid_tx); +} + int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_back_parties initiator) { struct ieee80211_local *local = sta->local; + struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; int ret; - u8 *state; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + if (!tid_tx) + return -ENOENT; + + spin_lock_bh(&sta->lock); + + if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { + /* not even started yet! */ + rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); + spin_unlock_bh(&sta->lock); + call_rcu(&tid_tx->rcu_head, kfree_tid_tx); + return 0; + } + + spin_unlock_bh(&sta->lock); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", sta->sta.addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ - state = &sta->ampdu_mlme.tid_state_tx[tid]; + set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); - if (*state == HT_AGG_STATE_OPERATIONAL) - sta->ampdu_mlme.addba_req_num[tid] = 0; + /* + * After this packets are no longer handed right through + * to the driver but are put onto tid_tx->pending instead, + * with locking to ensure proper access. + */ + clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); - *state = HT_AGG_STATE_REQ_STOP_BA_MSK | - (initiator << HT_AGG_STATE_INITIATOR_SHIFT); + tid_tx->stop_initiator = initiator; ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_TX_STOP, @@ -174,16 +213,14 @@ static void sta_addba_resp_timer_expired(unsigned long data) u16 tid = *(u8 *)data; struct sta_info *sta = container_of((void *)data, struct sta_info, timer_to_tid[tid]); - u8 *state; - - state = &sta->ampdu_mlme.tid_state_tx[tid]; + struct tid_ampdu_tx *tid_tx; /* check if the TID waits for addBA response */ - spin_lock_bh(&sta->lock); - if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK | - HT_AGG_STATE_REQ_STOP_BA_MSK)) != - HT_ADDBA_REQUESTED_MSK) { - spin_unlock_bh(&sta->lock); + rcu_read_lock(); + tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); + if (!tid_tx || + test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { + rcu_read_unlock(); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "timer expired on tid %d but we are not " "(or no longer) expecting addBA response there\n", @@ -196,8 +233,8 @@ static void sta_addba_resp_timer_expired(unsigned long data) printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); #endif - ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); - spin_unlock_bh(&sta->lock); + ieee80211_stop_tx_ba_session(&sta->sta, tid); + rcu_read_unlock(); } static inline int ieee80211_ac_from_tid(int tid) @@ -205,14 +242,112 @@ static inline int ieee80211_ac_from_tid(int tid) return ieee802_1d_to_ac[tid & 7]; } +/* + * When multiple aggregation sessions on multiple stations + * are being created/destroyed simultaneously, we need to + * refcount the global queue stop caused by that in order + * to not get into a situation where one of the aggregation + * setup or teardown re-enables queues before the other is + * ready to handle that. + * + * These two functions take care of this issue by keeping + * a global "agg_queue_stop" refcount. + */ +static void __acquires(agg_queue) +ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid) +{ + int queue = ieee80211_ac_from_tid(tid); + + if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1) + ieee80211_stop_queue_by_reason( + &local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + __acquire(agg_queue); +} + +static void __releases(agg_queue) +ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) +{ + int queue = ieee80211_ac_from_tid(tid); + + if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) + ieee80211_wake_queue_by_reason( + &local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + __release(agg_queue); +} + +void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) +{ + struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + u16 start_seq_num; + int ret; + + lockdep_assert_held(&sta->ampdu_mlme.mtx); + + /* + * While we're asking the driver about the aggregation, + * stop the AC queue so that we don't have to worry + * about frames that came in while we were doing that, + * which would require us to put them to the AC pending + * afterwards which just makes the code more complex. + */ + ieee80211_stop_queue_agg(local, tid); + + clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); + + /* + * make sure no packets are being processed to get + * valid starting sequence number + */ + synchronize_net(); + + start_seq_num = sta->tid_seq[tid] >> 4; + + ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, + &sta->sta, tid, &start_seq_num); + if (ret) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "BA request denied - HW unavailable for" + " tid %d\n", tid); +#endif + spin_lock_bh(&sta->lock); + rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); + spin_unlock_bh(&sta->lock); + + ieee80211_wake_queue_agg(local, tid); + call_rcu(&tid_tx->rcu_head, kfree_tid_tx); + return; + } + + /* we can take packets again now */ + ieee80211_wake_queue_agg(local, tid); + + /* activate the timer for the recipient's addBA response */ + mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); +#endif + + spin_lock_bh(&sta->lock); + sta->ampdu_mlme.addba_req_num[tid]++; + spin_unlock_bh(&sta->lock); + + /* send AddBA request */ + ieee80211_send_addba_request(sdata, sta->sta.addr, tid, + tid_tx->dialog_token, start_seq_num, + 0x40, 5000); +} + int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; - u8 *state; + struct tid_ampdu_tx *tid_tx; int ret = 0; - u16 start_seq_num; trace_api_start_tx_ba_session(pubsta, tid); @@ -239,24 +374,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) sdata->vif.type != NL80211_IFTYPE_AP) return -EINVAL; - if (test_sta_flags(sta, WLAN_STA_DISASSOC)) { -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "Disassociation is in progress. " - "Denying BA session request\n"); -#endif - return -EINVAL; - } - if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { #ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "Suspend in progress. " + printk(KERN_DEBUG "BA sessions blocked. " "Denying BA session request\n"); #endif return -EINVAL; } spin_lock_bh(&sta->lock); - spin_lock(&local->ampdu_lock); /* we have tried too many times, receiver does not want A-MPDU */ if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { @@ -264,9 +390,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) goto err_unlock_sta; } - state = &sta->ampdu_mlme.tid_state_tx[tid]; + tid_tx = sta->ampdu_mlme.tid_tx[tid]; /* check if the TID is not in aggregation flow already */ - if (*state != HT_AGG_STATE_IDLE) { + if (tid_tx) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "BA request denied - session is not " "idle on tid %u\n", tid); @@ -275,96 +401,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) goto err_unlock_sta; } - /* - * While we're asking the driver about the aggregation, - * stop the AC queue so that we don't have to worry - * about frames that came in while we were doing that, - * which would require us to put them to the AC pending - * afterwards which just makes the code more complex. - */ - ieee80211_stop_queue_by_reason( - &local->hw, ieee80211_ac_from_tid(tid), - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - /* prepare A-MPDU MLME for Tx aggregation */ - sta->ampdu_mlme.tid_tx[tid] = - kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); - if (!sta->ampdu_mlme.tid_tx[tid]) { + tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); + if (!tid_tx) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "allocate tx mlme to tid %d failed\n", tid); #endif ret = -ENOMEM; - goto err_wake_queue; + goto err_unlock_sta; } - skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); + skb_queue_head_init(&tid_tx->pending); + __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); /* Tx timer */ - sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = - sta_addba_resp_timer_expired; - sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = - (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); - - /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the - * call back right away, it must see that the flow has begun */ - *state |= HT_ADDBA_REQUESTED_MSK; - - start_seq_num = sta->tid_seq[tid] >> 4; - - ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, - pubsta, tid, &start_seq_num); + tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; + tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; + init_timer(&tid_tx->addba_resp_timer); - if (ret) { -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "BA request denied - HW unavailable for" - " tid %d\n", tid); -#endif /* CONFIG_MAC80211_HT_DEBUG */ - *state = HT_AGG_STATE_IDLE; - goto err_free; - } - - /* Driver vetoed or OKed, but we can take packets again now */ - ieee80211_wake_queue_by_reason( - &local->hw, ieee80211_ac_from_tid(tid), - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - - spin_unlock(&local->ampdu_lock); - - /* prepare tid data */ + /* assign a dialog token */ sta->ampdu_mlme.dialog_token_allocator++; - sta->ampdu_mlme.tid_tx[tid]->dialog_token = - sta->ampdu_mlme.dialog_token_allocator; - sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; + tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; - spin_unlock_bh(&sta->lock); + /* finally, assign it to the array */ + rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); - /* send AddBA request */ - ieee80211_send_addba_request(sdata, pubsta->addr, tid, - sta->ampdu_mlme.tid_tx[tid]->dialog_token, - sta->ampdu_mlme.tid_tx[tid]->ssn, - 0x40, 5000); - sta->ampdu_mlme.addba_req_num[tid]++; - /* activate the timer for the recipient's addBA response */ - sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = - jiffies + ADDBA_RESP_INTERVAL; - add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); -#endif - return 0; - - err_free: - kfree(sta->ampdu_mlme.tid_tx[tid]); - sta->ampdu_mlme.tid_tx[tid] = NULL; - err_wake_queue: - ieee80211_wake_queue_by_reason( - &local->hw, ieee80211_ac_from_tid(tid), - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + + /* this flow continues off the work */ err_unlock_sta: - spin_unlock(&local->ampdu_lock); spin_unlock_bh(&sta->lock); return ret; } @@ -372,69 +439,65 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session); /* * splice packets from the STA's pending to the local pending, - * requires a call to ieee80211_agg_splice_finish and holding - * local->ampdu_lock across both calls. + * requires a call to ieee80211_agg_splice_finish later */ -static void ieee80211_agg_splice_packets(struct ieee80211_local *local, - struct sta_info *sta, u16 tid) +static void __acquires(agg_queue) +ieee80211_agg_splice_packets(struct ieee80211_local *local, + struct tid_ampdu_tx *tid_tx, u16 tid) { + int queue = ieee80211_ac_from_tid(tid); unsigned long flags; - u16 queue = ieee80211_ac_from_tid(tid); - - ieee80211_stop_queue_by_reason( - &local->hw, queue, - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); - if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)) - return; + ieee80211_stop_queue_agg(local, tid); - if (WARN(!sta->ampdu_mlme.tid_tx[tid], - "TID %d gone but expected when splicing aggregates from" - "the pending queue\n", tid)) + if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" + " from the pending queue\n", tid)) return; - if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { + if (!skb_queue_empty(&tid_tx->pending)) { spin_lock_irqsave(&local->queue_stop_reason_lock, flags); /* copy over remaining packets */ - skb_queue_splice_tail_init( - &sta->ampdu_mlme.tid_tx[tid]->pending, - &local->pending[queue]); + skb_queue_splice_tail_init(&tid_tx->pending, + &local->pending[queue]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } } -static void ieee80211_agg_splice_finish(struct ieee80211_local *local, - struct sta_info *sta, u16 tid) +static void __releases(agg_queue) +ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) { - u16 queue = ieee80211_ac_from_tid(tid); - - ieee80211_wake_queue_by_reason( - &local->hw, queue, - IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + ieee80211_wake_queue_agg(local, tid); } -/* caller must hold sta->lock */ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, struct sta_info *sta, u16 tid) { + lockdep_assert_held(&sta->ampdu_mlme.mtx); + #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); #endif - spin_lock(&local->ampdu_lock); - ieee80211_agg_splice_packets(local, sta, tid); - /* - * NB: we rely on sta->lock being taken in the TX - * processing here when adding to the pending queue, - * otherwise we could only change the state of the - * session to OPERATIONAL _here_. - */ - ieee80211_agg_splice_finish(local, sta, tid); - spin_unlock(&local->ampdu_lock); - drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_TX_OPERATIONAL, &sta->sta, tid, NULL); + + /* + * synchronize with TX path, while splicing the TX path + * should block so it won't put more packets onto pending. + */ + spin_lock_bh(&sta->lock); + + ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid); + /* + * Now mark as operational. This will be visible + * in the TX path, and lets it go lock-free in + * the common case. + */ + set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state); + ieee80211_agg_splice_finish(local, tid); + + spin_unlock_bh(&sta->lock); } void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) @@ -442,7 +505,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct sta_info *sta; - u8 *state; + struct tid_ampdu_tx *tid_tx; trace_api_start_tx_ba_cb(sdata, ra, tid); @@ -454,42 +517,36 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) return; } - rcu_read_lock(); + mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, ra); if (!sta) { - rcu_read_unlock(); + mutex_unlock(&local->sta_mtx); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); #endif return; } - state = &sta->ampdu_mlme.tid_state_tx[tid]; - spin_lock_bh(&sta->lock); + mutex_lock(&sta->ampdu_mlme.mtx); + tid_tx = sta->ampdu_mlme.tid_tx[tid]; - if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) { + if (WARN_ON(!tid_tx)) { #ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", - *state); + printk(KERN_DEBUG "addBA was not requested!\n"); #endif - spin_unlock_bh(&sta->lock); - rcu_read_unlock(); - return; + goto unlock; } - if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK)) - goto out; - - *state |= HT_ADDBA_DRV_READY_MSK; + if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) + goto unlock; - if (*state == HT_AGG_STATE_OPERATIONAL) + if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); - out: - spin_unlock_bh(&sta->lock); - rcu_read_unlock(); + unlock: + mutex_unlock(&sta->ampdu_mlme.mtx); + mutex_unlock(&local->sta_mtx); } -EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid) @@ -510,44 +567,36 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, ra_tid = (struct ieee80211_ra_tid *) &skb->cb; memcpy(&ra_tid->ra, ra, ETH_ALEN); ra_tid->tid = tid; - ra_tid->vif = vif; - skb->pkt_type = IEEE80211_ADDBA_MSG; - skb_queue_tail(&local->skb_queue, skb); - tasklet_schedule(&local->tasklet); + skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; + skb_queue_tail(&sdata->skb_queue, skb); + ieee80211_queue_work(&local->hw, &sdata->work); } EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_back_parties initiator) { - u8 *state; int ret; - /* check if the TID is in aggregation */ - state = &sta->ampdu_mlme.tid_state_tx[tid]; - spin_lock_bh(&sta->lock); - - if (*state != HT_AGG_STATE_OPERATIONAL) { - ret = -ENOENT; - goto unlock; - } + mutex_lock(&sta->ampdu_mlme.mtx); ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); - unlock: - spin_unlock_bh(&sta->lock); + mutex_unlock(&sta->ampdu_mlme.mtx); + return ret; } -int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, - enum ieee80211_back_parties initiator) +int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; + struct tid_ampdu_tx *tid_tx; + int ret = 0; - trace_api_stop_tx_ba_session(pubsta, tid, initiator); + trace_api_stop_tx_ba_session(pubsta, tid); if (!local->ops->ampdu_action) return -EINVAL; @@ -555,7 +604,26 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, if (tid >= STA_TID_NUM) return -EINVAL; - return __ieee80211_stop_tx_ba_session(sta, tid, initiator); + spin_lock_bh(&sta->lock); + tid_tx = sta->ampdu_mlme.tid_tx[tid]; + + if (!tid_tx) { + ret = -ENOENT; + goto unlock; + } + + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { + /* already in progress stopping it */ + ret = 0; + goto unlock; + } + + set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); + + unlock: + spin_unlock_bh(&sta->lock); + return ret; } EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); @@ -564,7 +632,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct sta_info *sta; - u8 *state; + struct tid_ampdu_tx *tid_tx; trace_api_stop_tx_ba_cb(sdata, ra, tid); @@ -581,51 +649,56 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) ra, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ - rcu_read_lock(); + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, ra); if (!sta) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); #endif - rcu_read_unlock(); - return; + goto unlock; } - state = &sta->ampdu_mlme.tid_state_tx[tid]; - /* NOTE: no need to use sta->lock in this state check, as - * ieee80211_stop_tx_ba_session will let only one stop call to - * pass through per sta/tid - */ - if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { + mutex_lock(&sta->ampdu_mlme.mtx); + spin_lock_bh(&sta->lock); + tid_tx = sta->ampdu_mlme.tid_tx[tid]; + + if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); #endif - rcu_read_unlock(); - return; + goto unlock_sta; } - if (*state & HT_AGG_STATE_INITIATOR_MSK) + if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR) ieee80211_send_delba(sta->sdata, ra, tid, WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); - spin_lock_bh(&sta->lock); - spin_lock(&local->ampdu_lock); + /* + * When we get here, the TX path will not be lockless any more wrt. + * aggregation, since the OPERATIONAL bit has long been cleared. + * Thus it will block on getting the lock, if it occurs. So if we + * stop the queue now, we will not get any more packets, and any + * that might be being processed will wait for us here, thereby + * guaranteeing that no packets go to the tid_tx pending queue any + * more. + */ - ieee80211_agg_splice_packets(local, sta, tid); + ieee80211_agg_splice_packets(local, tid_tx, tid); - *state = HT_AGG_STATE_IDLE; - /* from now on packets are no longer put onto sta->pending */ - kfree(sta->ampdu_mlme.tid_tx[tid]); - sta->ampdu_mlme.tid_tx[tid] = NULL; + /* future packets must not find the tid_tx struct any more */ + rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); - ieee80211_agg_splice_finish(local, sta, tid); + ieee80211_agg_splice_finish(local, tid); - spin_unlock(&local->ampdu_lock); - spin_unlock_bh(&sta->lock); + call_rcu(&tid_tx->rcu_head, kfree_tid_tx); - rcu_read_unlock(); + unlock_sta: + spin_unlock_bh(&sta->lock); + mutex_unlock(&sta->ampdu_mlme.mtx); + unlock: + mutex_unlock(&local->sta_mtx); } -EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, u16 tid) @@ -646,11 +719,10 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, ra_tid = (struct ieee80211_ra_tid *) &skb->cb; memcpy(&ra_tid->ra, ra, ETH_ALEN); ra_tid->tid = tid; - ra_tid->vif = vif; - skb->pkt_type = IEEE80211_DELBA_MSG; - skb_queue_tail(&local->skb_queue, skb); - tasklet_schedule(&local->tasklet); + skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; + skb_queue_tail(&sdata->skb_queue, skb); + ieee80211_queue_work(&local->hw, &sdata->work); } EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); @@ -660,40 +732,40 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, struct ieee80211_mgmt *mgmt, size_t len) { + struct tid_ampdu_tx *tid_tx; u16 capab, tid; - u8 *state; capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; - state = &sta->ampdu_mlme.tid_state_tx[tid]; - - spin_lock_bh(&sta->lock); + mutex_lock(&sta->ampdu_mlme.mtx); - if (!(*state & HT_ADDBA_REQUESTED_MSK)) + tid_tx = sta->ampdu_mlme.tid_tx[tid]; + if (!tid_tx) goto out; - if (mgmt->u.action.u.addba_resp.dialog_token != - sta->ampdu_mlme.tid_tx[tid]->dialog_token) { + if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); -#endif /* CONFIG_MAC80211_HT_DEBUG */ +#endif goto out; } - del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); + del_timer(&tid_tx->addba_resp_timer); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); -#endif /* CONFIG_MAC80211_HT_DEBUG */ +#endif if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) == WLAN_STATUS_SUCCESS) { - u8 curstate = *state; - - *state |= HT_ADDBA_RECEIVED_MSK; + if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, + &tid_tx->state)) { + /* ignore duplicate response */ + goto out; + } - if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL) + if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) ieee80211_agg_tx_operational(local, sta, tid); sta->ampdu_mlme.addba_req_num[tid] = 0; @@ -702,5 +774,5 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, } out: - spin_unlock_bh(&sta->lock); + mutex_unlock(&sta->ampdu_mlme.mtx); } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 67ee34f57df7b02cb06e5d0d4e44944cc95feca7..29ac8e1a509e48ec1170f61f668d8d3d5712d501 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -120,6 +120,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_key *key; int err; + if (!netif_running(dev)) + return -ENETDOWN; + sdata = IEEE80211_DEV_TO_SUB_IF(dev); switch (params->cipher) { @@ -140,17 +143,22 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, return -EINVAL; } + /* reject WEP and TKIP keys if WEP failed to initialize */ + if ((alg == ALG_WEP || alg == ALG_TKIP) && + IS_ERR(sdata->local->wep_tx_tfm)) + return -EINVAL; + key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key, params->seq_len, params->seq); if (!key) return -ENOMEM; - rcu_read_lock(); + mutex_lock(&sdata->local->sta_mtx); if (mac_addr) { sta = sta_info_get_bss(sdata, mac_addr); if (!sta) { - ieee80211_key_free(key); + ieee80211_key_free(sdata->local, key); err = -ENOENT; goto out_unlock; } @@ -160,7 +168,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, err = 0; out_unlock: - rcu_read_unlock(); + mutex_unlock(&sdata->local->sta_mtx); return err; } @@ -174,7 +182,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); - rcu_read_lock(); + mutex_lock(&sdata->local->sta_mtx); if (mac_addr) { ret = -ENOENT; @@ -184,7 +192,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, goto out_unlock; if (sta->key) { - ieee80211_key_free(sta->key); + ieee80211_key_free(sdata->local, sta->key); WARN_ON(sta->key); ret = 0; } @@ -197,12 +205,12 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, goto out_unlock; } - ieee80211_key_free(sdata->keys[key_idx]); + ieee80211_key_free(sdata->local, sdata->keys[key_idx]); WARN_ON(sdata->keys[key_idx]); ret = 0; out_unlock: - rcu_read_unlock(); + mutex_unlock(&sdata->local->sta_mtx); return ret; } @@ -305,15 +313,10 @@ static int ieee80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx) { - struct ieee80211_sub_if_data *sdata; - - rcu_read_lock(); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_set_default_key(sdata, key_idx); - rcu_read_unlock(); - return 0; } @@ -321,15 +324,10 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx) { - struct ieee80211_sub_if_data *sdata; - - rcu_read_lock(); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_set_default_mgmt_key(sdata, key_idx); - rcu_read_unlock(); - return 0; } @@ -415,9 +413,6 @@ static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - if (!local->ops->get_survey) - return -EOPNOTSUPP; - return drv_get_survey(local, idx, survey); } @@ -600,7 +595,7 @@ struct iapp_layer2_update { u8 ssap; /* 0 */ u8 control; u8 xid_info[3]; -} __attribute__ ((packed)); +} __packed; static void ieee80211_send_layer2_update(struct sta_info *sta) { @@ -1154,10 +1149,6 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, return -EINVAL; } - /* enable WMM or activate new settings */ - local->hw.conf.flags |= IEEE80211_CONF_QOS; - drv_config(local, IEEE80211_CONF_CHANGE_QOS); - return 0; } @@ -1331,28 +1322,28 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) } static int ieee80211_set_tx_power(struct wiphy *wiphy, - enum tx_power_setting type, int dbm) + enum nl80211_tx_power_setting type, int mbm) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_channel *chan = local->hw.conf.channel; u32 changes = 0; switch (type) { - case TX_POWER_AUTOMATIC: + case NL80211_TX_POWER_AUTOMATIC: local->user_power_level = -1; break; - case TX_POWER_LIMITED: - if (dbm < 0) - return -EINVAL; - local->user_power_level = dbm; + case NL80211_TX_POWER_LIMITED: + if (mbm < 0 || (mbm % 100)) + return -EOPNOTSUPP; + local->user_power_level = MBM_TO_DBM(mbm); break; - case TX_POWER_FIXED: - if (dbm < 0) - return -EINVAL; + case NL80211_TX_POWER_FIXED: + if (mbm < 0 || (mbm % 100)) + return -EOPNOTSUPP; /* TODO: move to cfg80211 when it knows the channel */ - if (dbm > chan->max_power) + if (MBM_TO_DBM(mbm) > chan->max_power) return -EINVAL; - local->user_power_level = dbm; + local->user_power_level = MBM_TO_DBM(mbm); break; } @@ -1448,7 +1439,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_conf *conf = &local->hw.conf; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; @@ -1457,11 +1447,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, return -EOPNOTSUPP; if (enabled == sdata->u.mgd.powersave && - timeout == conf->dynamic_ps_forced_timeout) + timeout == local->dynamic_ps_forced_timeout) return 0; sdata->u.mgd.powersave = enabled; - conf->dynamic_ps_forced_timeout = timeout; + local->dynamic_ps_forced_timeout = timeout; /* no change, but if automatic follow powersave */ mutex_lock(&sdata->u.mgd.mtx); @@ -1554,10 +1544,58 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, + bool channel_type_valid, const u8 *buf, size_t len, u64 *cookie) { - return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan, - channel_type, buf, len, cookie); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct sta_info *sta; + const struct ieee80211_mgmt *mgmt = (void *)buf; + u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | + IEEE80211_TX_CTL_REQ_TX_STATUS; + + /* Check that we are on the requested channel for transmission */ + if (chan != local->tmp_channel && + chan != local->oper_channel) + return -EBUSY; + if (channel_type_valid && + (channel_type != local->tmp_channel_type && + channel_type != local->_oper_channel_type)) + return -EBUSY; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_ADHOC: + if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) + break; + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->da); + rcu_read_unlock(); + if (!sta) + return -ENOLINK; + break; + case NL80211_IFTYPE_STATION: + if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED)) + flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + break; + default: + return -EOPNOTSUPP; + } + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); + if (!skb) + return -ENOMEM; + skb_reserve(skb, local->hw.extra_tx_headroom); + + memcpy(skb_put(skb, len), buf, len); + + IEEE80211_SKB_CB(skb)->flags = flags; + + skb->dev = sdata->dev; + ieee80211_tx_skb(sdata, skb); + + *cookie = (unsigned long) skb; + return 0; } struct cfg80211_ops mac80211_config_ops = { diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 637929b65ccc1f18b1bc0ad4dc7338170955c4fe..a694c593ff6ac1e17479ae07d26ad1fd02f258ba 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -307,9 +307,6 @@ static const struct file_operations queues_ops = { /* statistics stuff */ -#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ - DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value) - static ssize_t format_devstat_counter(struct ieee80211_local *local, char __user *userbuf, size_t count, loff_t *ppos, @@ -351,75 +348,16 @@ static const struct file_operations stats_ ##name## _ops = { \ .open = mac80211_open_file_generic, \ }; -#define DEBUGFS_STATS_ADD(name) \ +#define DEBUGFS_STATS_ADD(name, field) \ + debugfs_create_u32(#name, 0400, statsd, (u32 *) &field); +#define DEBUGFS_DEVSTATS_ADD(name) \ debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); -DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u", - local->dot11TransmittedFragmentCount); -DEBUGFS_STATS_FILE(multicast_transmitted_frame_count, 20, "%u", - local->dot11MulticastTransmittedFrameCount); -DEBUGFS_STATS_FILE(failed_count, 20, "%u", - local->dot11FailedCount); -DEBUGFS_STATS_FILE(retry_count, 20, "%u", - local->dot11RetryCount); -DEBUGFS_STATS_FILE(multiple_retry_count, 20, "%u", - local->dot11MultipleRetryCount); -DEBUGFS_STATS_FILE(frame_duplicate_count, 20, "%u", - local->dot11FrameDuplicateCount); -DEBUGFS_STATS_FILE(received_fragment_count, 20, "%u", - local->dot11ReceivedFragmentCount); -DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u", - local->dot11MulticastReceivedFrameCount); -DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u", - local->dot11TransmittedFrameCount); -#ifdef CONFIG_MAC80211_DEBUG_COUNTERS -DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u", - local->tx_handlers_drop); -DEBUGFS_STATS_FILE(tx_handlers_queued, 20, "%u", - local->tx_handlers_queued); -DEBUGFS_STATS_FILE(tx_handlers_drop_unencrypted, 20, "%u", - local->tx_handlers_drop_unencrypted); -DEBUGFS_STATS_FILE(tx_handlers_drop_fragment, 20, "%u", - local->tx_handlers_drop_fragment); -DEBUGFS_STATS_FILE(tx_handlers_drop_wep, 20, "%u", - local->tx_handlers_drop_wep); -DEBUGFS_STATS_FILE(tx_handlers_drop_not_assoc, 20, "%u", - local->tx_handlers_drop_not_assoc); -DEBUGFS_STATS_FILE(tx_handlers_drop_unauth_port, 20, "%u", - local->tx_handlers_drop_unauth_port); -DEBUGFS_STATS_FILE(rx_handlers_drop, 20, "%u", - local->rx_handlers_drop); -DEBUGFS_STATS_FILE(rx_handlers_queued, 20, "%u", - local->rx_handlers_queued); -DEBUGFS_STATS_FILE(rx_handlers_drop_nullfunc, 20, "%u", - local->rx_handlers_drop_nullfunc); -DEBUGFS_STATS_FILE(rx_handlers_drop_defrag, 20, "%u", - local->rx_handlers_drop_defrag); -DEBUGFS_STATS_FILE(rx_handlers_drop_short, 20, "%u", - local->rx_handlers_drop_short); -DEBUGFS_STATS_FILE(rx_handlers_drop_passive_scan, 20, "%u", - local->rx_handlers_drop_passive_scan); -DEBUGFS_STATS_FILE(tx_expand_skb_head, 20, "%u", - local->tx_expand_skb_head); -DEBUGFS_STATS_FILE(tx_expand_skb_head_cloned, 20, "%u", - local->tx_expand_skb_head_cloned); -DEBUGFS_STATS_FILE(rx_expand_skb_head, 20, "%u", - local->rx_expand_skb_head); -DEBUGFS_STATS_FILE(rx_expand_skb_head2, 20, "%u", - local->rx_expand_skb_head2); -DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u", - local->rx_handlers_fragments); -DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", - local->tx_status_drop); - -#endif - DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); - void debugfs_hw_add(struct ieee80211_local *local) { struct dentry *phyd = local->hw.wiphy->debugfsdir; @@ -448,38 +386,60 @@ void debugfs_hw_add(struct ieee80211_local *local) if (!statsd) return; - DEBUGFS_STATS_ADD(transmitted_fragment_count); - DEBUGFS_STATS_ADD(multicast_transmitted_frame_count); - DEBUGFS_STATS_ADD(failed_count); - DEBUGFS_STATS_ADD(retry_count); - DEBUGFS_STATS_ADD(multiple_retry_count); - DEBUGFS_STATS_ADD(frame_duplicate_count); - DEBUGFS_STATS_ADD(received_fragment_count); - DEBUGFS_STATS_ADD(multicast_received_frame_count); - DEBUGFS_STATS_ADD(transmitted_frame_count); + DEBUGFS_STATS_ADD(transmitted_fragment_count, + local->dot11TransmittedFragmentCount); + DEBUGFS_STATS_ADD(multicast_transmitted_frame_count, + local->dot11MulticastTransmittedFrameCount); + DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount); + DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount); + DEBUGFS_STATS_ADD(multiple_retry_count, + local->dot11MultipleRetryCount); + DEBUGFS_STATS_ADD(frame_duplicate_count, + local->dot11FrameDuplicateCount); + DEBUGFS_STATS_ADD(received_fragment_count, + local->dot11ReceivedFragmentCount); + DEBUGFS_STATS_ADD(multicast_received_frame_count, + local->dot11MulticastReceivedFrameCount); + DEBUGFS_STATS_ADD(transmitted_frame_count, + local->dot11TransmittedFrameCount); #ifdef CONFIG_MAC80211_DEBUG_COUNTERS - DEBUGFS_STATS_ADD(tx_handlers_drop); - DEBUGFS_STATS_ADD(tx_handlers_queued); - DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted); - DEBUGFS_STATS_ADD(tx_handlers_drop_fragment); - DEBUGFS_STATS_ADD(tx_handlers_drop_wep); - DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); - DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); - DEBUGFS_STATS_ADD(rx_handlers_drop); - DEBUGFS_STATS_ADD(rx_handlers_queued); - DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); - DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); - DEBUGFS_STATS_ADD(rx_handlers_drop_short); - DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan); - DEBUGFS_STATS_ADD(tx_expand_skb_head); - DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); - DEBUGFS_STATS_ADD(rx_expand_skb_head); - DEBUGFS_STATS_ADD(rx_expand_skb_head2); - DEBUGFS_STATS_ADD(rx_handlers_fragments); - DEBUGFS_STATS_ADD(tx_status_drop); + DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop); + DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued); + DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted, + local->tx_handlers_drop_unencrypted); + DEBUGFS_STATS_ADD(tx_handlers_drop_fragment, + local->tx_handlers_drop_fragment); + DEBUGFS_STATS_ADD(tx_handlers_drop_wep, + local->tx_handlers_drop_wep); + DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc, + local->tx_handlers_drop_not_assoc); + DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port, + local->tx_handlers_drop_unauth_port); + DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop); + DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued); + DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc, + local->rx_handlers_drop_nullfunc); + DEBUGFS_STATS_ADD(rx_handlers_drop_defrag, + local->rx_handlers_drop_defrag); + DEBUGFS_STATS_ADD(rx_handlers_drop_short, + local->rx_handlers_drop_short); + DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan, + local->rx_handlers_drop_passive_scan); + DEBUGFS_STATS_ADD(tx_expand_skb_head, + local->tx_expand_skb_head); + DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned, + local->tx_expand_skb_head_cloned); + DEBUGFS_STATS_ADD(rx_expand_skb_head, + local->rx_expand_skb_head); + DEBUGFS_STATS_ADD(rx_expand_skb_head2, + local->rx_expand_skb_head2); + DEBUGFS_STATS_ADD(rx_handlers_fragments, + local->rx_handlers_fragments); + DEBUGFS_STATS_ADD(tx_status_drop, + local->tx_status_drop); #endif - DEBUGFS_STATS_ADD(dot11ACKFailureCount); - DEBUGFS_STATS_ADD(dot11RTSFailureCount); - DEBUGFS_STATS_ADD(dot11FCSErrorCount); - DEBUGFS_STATS_ADD(dot11RTSSuccessCount); + DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount); + DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount); + DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount); + DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount); } diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 97c9e46e859e9dbd081d5964437c9db73440441a..fa5e76e658ef058b5fb87c4d4647c5025a3e4d71 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -143,7 +143,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, len = p - buf; break; case ALG_CCMP: - for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { + for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) { rpn = key->u.ccmp.rx_pn[i]; p += scnprintf(p, sizeof(buf)+buf-p, "%02x%02x%02x%02x%02x%02x\n", diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index e763f1529ddbca6ce54c3af0dd848cd96c643f61..76839d4dfaacabe15c30f886478367ad2b524585 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -30,7 +30,6 @@ static ssize_t sta_ ##name## _read(struct file *file, \ } #define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") #define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") -#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") #define STA_OPS(name) \ @@ -52,19 +51,7 @@ static const struct file_operations sta_ ##name## _ops = { \ STA_FILE(aid, sta.aid, D); STA_FILE(dev, sdata->name, S); -STA_FILE(rx_packets, rx_packets, LU); -STA_FILE(tx_packets, tx_packets, LU); -STA_FILE(rx_bytes, rx_bytes, LU); -STA_FILE(tx_bytes, tx_bytes, LU); -STA_FILE(rx_duplicates, num_duplicates, LU); -STA_FILE(rx_fragments, rx_fragments, LU); -STA_FILE(rx_dropped, rx_dropped, LU); -STA_FILE(tx_fragments, tx_fragments, LU); -STA_FILE(tx_filtered, tx_filtered_count, LU); -STA_FILE(tx_retry_failed, tx_retry_failed, LU); -STA_FILE(tx_retry_count, tx_retry_count, LU); STA_FILE(last_signal, last_signal, D); -STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); static ssize_t sta_flags_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) @@ -134,28 +121,25 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", sta->ampdu_mlme.dialog_token_allocator + 1); p += scnprintf(p, sizeof(buf) + buf - p, - "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); + "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); for (i = 0; i < STA_TID_NUM; i++) { p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", - sta->ampdu_mlme.tid_active_rx[i]); + !!sta->ampdu_mlme.tid_rx[i]); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", - sta->ampdu_mlme.tid_active_rx[i] ? + sta->ampdu_mlme.tid_rx[i] ? sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", - sta->ampdu_mlme.tid_active_rx[i] ? + sta->ampdu_mlme.tid_rx[i] ? sta->ampdu_mlme.tid_rx[i]->ssn : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", - sta->ampdu_mlme.tid_state_tx[i]); + !!sta->ampdu_mlme.tid_tx[i]); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", - sta->ampdu_mlme.tid_state_tx[i] ? + sta->ampdu_mlme.tid_tx[i] ? sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); - p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", - sta->ampdu_mlme.tid_state_tx[i] ? - sta->ampdu_mlme.tid_tx[i]->ssn : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", - sta->ampdu_mlme.tid_state_tx[i] ? + sta->ampdu_mlme.tid_tx[i] ? skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\n"); } @@ -210,8 +194,7 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu if (start) ret = ieee80211_start_tx_ba_session(&sta->sta, tid); else - ret = ieee80211_stop_tx_ba_session(&sta->sta, tid, - WLAN_BACK_RECIPIENT); + ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); } else { __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3); ret = 0; @@ -307,6 +290,13 @@ STA_OPS(ht_capa); debugfs_create_file(#name, 0400, \ sta->debugfs.dir, sta, &sta_ ##name## _ops); +#define DEBUGFS_ADD_COUNTER(name, field) \ + if (sizeof(sta->field) == sizeof(u32)) \ + debugfs_create_u32(#name, 0400, sta->debugfs.dir, \ + (u32 *) &sta->field); \ + else \ + debugfs_create_u64(#name, 0400, sta->debugfs.dir, \ + (u64 *) &sta->field); void ieee80211_sta_debugfs_add(struct sta_info *sta) { @@ -339,20 +329,21 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) DEBUGFS_ADD(last_seq_ctrl); DEBUGFS_ADD(agg_status); DEBUGFS_ADD(dev); - DEBUGFS_ADD(rx_packets); - DEBUGFS_ADD(tx_packets); - DEBUGFS_ADD(rx_bytes); - DEBUGFS_ADD(tx_bytes); - DEBUGFS_ADD(rx_duplicates); - DEBUGFS_ADD(rx_fragments); - DEBUGFS_ADD(rx_dropped); - DEBUGFS_ADD(tx_fragments); - DEBUGFS_ADD(tx_filtered); - DEBUGFS_ADD(tx_retry_failed); - DEBUGFS_ADD(tx_retry_count); DEBUGFS_ADD(last_signal); - DEBUGFS_ADD(wep_weak_iv_count); DEBUGFS_ADD(ht_capa); + + DEBUGFS_ADD_COUNTER(rx_packets, rx_packets); + DEBUGFS_ADD_COUNTER(tx_packets, tx_packets); + DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes); + DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes); + DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates); + DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments); + DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped); + DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments); + DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count); + DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); + DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); + DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); } void ieee80211_sta_debugfs_remove(struct sta_info *sta) diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 9c1da0809160215c4178358f8f17be8c6d774e27..14123dce544bf960a57d6f06fdbda6bbcc83f88a 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -16,10 +16,11 @@ static inline int drv_start(struct ieee80211_local *local) might_sleep(); + trace_drv_start(local); local->started = true; smp_mb(); ret = local->ops->start(&local->hw); - trace_drv_start(local, ret); + trace_drv_return_int(local, ret); return ret; } @@ -27,8 +28,9 @@ static inline void drv_stop(struct ieee80211_local *local) { might_sleep(); - local->ops->stop(&local->hw); trace_drv_stop(local); + local->ops->stop(&local->hw); + trace_drv_return_void(local); /* sync away all work on the tasklet before clearing started */ tasklet_disable(&local->tasklet); @@ -46,8 +48,9 @@ static inline int drv_add_interface(struct ieee80211_local *local, might_sleep(); + trace_drv_add_interface(local, vif_to_sdata(vif)); ret = local->ops->add_interface(&local->hw, vif); - trace_drv_add_interface(local, vif_to_sdata(vif), ret); + trace_drv_return_int(local, ret); return ret; } @@ -56,8 +59,9 @@ static inline void drv_remove_interface(struct ieee80211_local *local, { might_sleep(); - local->ops->remove_interface(&local->hw, vif); trace_drv_remove_interface(local, vif_to_sdata(vif)); + local->ops->remove_interface(&local->hw, vif); + trace_drv_return_void(local); } static inline int drv_config(struct ieee80211_local *local, u32 changed) @@ -66,8 +70,9 @@ static inline int drv_config(struct ieee80211_local *local, u32 changed) might_sleep(); + trace_drv_config(local, changed); ret = local->ops->config(&local->hw, changed); - trace_drv_config(local, changed, ret); + trace_drv_return_int(local, ret); return ret; } @@ -78,9 +83,10 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, { might_sleep(); + trace_drv_bss_info_changed(local, sdata, info, changed); if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); - trace_drv_bss_info_changed(local, sdata, info, changed); + trace_drv_return_void(local); } static inline u64 drv_prepare_multicast(struct ieee80211_local *local, @@ -88,10 +94,12 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local, { u64 ret = 0; + trace_drv_prepare_multicast(local, mc_list->count); + if (local->ops->prepare_multicast) ret = local->ops->prepare_multicast(&local->hw, mc_list); - trace_drv_prepare_multicast(local, mc_list->count, ret); + trace_drv_return_u64(local, ret); return ret; } @@ -103,19 +111,21 @@ static inline void drv_configure_filter(struct ieee80211_local *local, { might_sleep(); - local->ops->configure_filter(&local->hw, changed_flags, total_flags, - multicast); trace_drv_configure_filter(local, changed_flags, total_flags, multicast); + local->ops->configure_filter(&local->hw, changed_flags, total_flags, + multicast); + trace_drv_return_void(local); } static inline int drv_set_tim(struct ieee80211_local *local, struct ieee80211_sta *sta, bool set) { int ret = 0; + trace_drv_set_tim(local, sta, set); if (local->ops->set_tim) ret = local->ops->set_tim(&local->hw, sta, set); - trace_drv_set_tim(local, sta, set, ret); + trace_drv_return_int(local, ret); return ret; } @@ -129,8 +139,9 @@ static inline int drv_set_key(struct ieee80211_local *local, might_sleep(); + trace_drv_set_key(local, cmd, sdata, sta, key); ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); - trace_drv_set_key(local, cmd, sdata, sta, key, ret); + trace_drv_return_int(local, ret); return ret; } @@ -145,10 +156,11 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local, if (sta) ista = &sta->sta; + trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); if (local->ops->update_tkip_key) local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, ista, iv32, phase1key); - trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); + trace_drv_return_void(local); } static inline int drv_hw_scan(struct ieee80211_local *local, @@ -159,8 +171,9 @@ static inline int drv_hw_scan(struct ieee80211_local *local, might_sleep(); + trace_drv_hw_scan(local, sdata, req); ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); - trace_drv_hw_scan(local, sdata, req, ret); + trace_drv_return_int(local, ret); return ret; } @@ -168,18 +181,20 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local) { might_sleep(); + trace_drv_sw_scan_start(local); if (local->ops->sw_scan_start) local->ops->sw_scan_start(&local->hw); - trace_drv_sw_scan_start(local); + trace_drv_return_void(local); } static inline void drv_sw_scan_complete(struct ieee80211_local *local) { might_sleep(); + trace_drv_sw_scan_complete(local); if (local->ops->sw_scan_complete) local->ops->sw_scan_complete(&local->hw); - trace_drv_sw_scan_complete(local); + trace_drv_return_void(local); } static inline int drv_get_stats(struct ieee80211_local *local, @@ -211,9 +226,10 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local, might_sleep(); + trace_drv_set_rts_threshold(local, value); if (local->ops->set_rts_threshold) ret = local->ops->set_rts_threshold(&local->hw, value); - trace_drv_set_rts_threshold(local, value, ret); + trace_drv_return_int(local, ret); return ret; } @@ -223,12 +239,13 @@ static inline int drv_set_coverage_class(struct ieee80211_local *local, int ret = 0; might_sleep(); + trace_drv_set_coverage_class(local, value); if (local->ops->set_coverage_class) local->ops->set_coverage_class(&local->hw, value); else ret = -EOPNOTSUPP; - trace_drv_set_coverage_class(local, value, ret); + trace_drv_return_int(local, ret); return ret; } @@ -237,9 +254,10 @@ static inline void drv_sta_notify(struct ieee80211_local *local, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { + trace_drv_sta_notify(local, sdata, cmd, sta); if (local->ops->sta_notify) local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); - trace_drv_sta_notify(local, sdata, cmd, sta); + trace_drv_return_void(local); } static inline int drv_sta_add(struct ieee80211_local *local, @@ -250,13 +268,11 @@ static inline int drv_sta_add(struct ieee80211_local *local, might_sleep(); + trace_drv_sta_add(local, sdata, sta); if (local->ops->sta_add) ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); - else if (local->ops->sta_notify) - local->ops->sta_notify(&local->hw, &sdata->vif, - STA_NOTIFY_ADD, sta); - trace_drv_sta_add(local, sdata, sta, ret); + trace_drv_return_int(local, ret); return ret; } @@ -267,13 +283,11 @@ static inline void drv_sta_remove(struct ieee80211_local *local, { might_sleep(); + trace_drv_sta_remove(local, sdata, sta); if (local->ops->sta_remove) local->ops->sta_remove(&local->hw, &sdata->vif, sta); - else if (local->ops->sta_notify) - local->ops->sta_notify(&local->hw, &sdata->vif, - STA_NOTIFY_REMOVE, sta); - trace_drv_sta_remove(local, sdata, sta); + trace_drv_return_void(local); } static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, @@ -283,9 +297,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, might_sleep(); + trace_drv_conf_tx(local, queue, params); if (local->ops->conf_tx) ret = local->ops->conf_tx(&local->hw, queue, params); - trace_drv_conf_tx(local, queue, params, ret); + trace_drv_return_int(local, ret); return ret; } @@ -295,9 +310,10 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local) might_sleep(); + trace_drv_get_tsf(local); if (local->ops->get_tsf) ret = local->ops->get_tsf(&local->hw); - trace_drv_get_tsf(local, ret); + trace_drv_return_u64(local, ret); return ret; } @@ -305,18 +321,20 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) { might_sleep(); + trace_drv_set_tsf(local, tsf); if (local->ops->set_tsf) local->ops->set_tsf(&local->hw, tsf); - trace_drv_set_tsf(local, tsf); + trace_drv_return_void(local); } static inline void drv_reset_tsf(struct ieee80211_local *local) { might_sleep(); + trace_drv_reset_tsf(local); if (local->ops->reset_tsf) local->ops->reset_tsf(&local->hw); - trace_drv_reset_tsf(local); + trace_drv_return_void(local); } static inline int drv_tx_last_beacon(struct ieee80211_local *local) @@ -325,9 +343,10 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local) might_sleep(); + trace_drv_tx_last_beacon(local); if (local->ops->tx_last_beacon) ret = local->ops->tx_last_beacon(&local->hw); - trace_drv_tx_last_beacon(local, ret); + trace_drv_return_int(local, ret); return ret; } @@ -338,10 +357,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, u16 *ssn) { int ret = -EOPNOTSUPP; + + might_sleep(); + + trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn); + if (local->ops->ampdu_action) ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, sta, tid, ssn); - trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret); + + trace_drv_return_int(local, ret); + return ret; } @@ -349,9 +375,14 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx, struct survey_info *survey) { int ret = -EOPNOTSUPP; + + trace_drv_get_survey(local, idx, survey); + if (local->ops->get_survey) ret = local->ops->get_survey(&local->hw, idx, survey); - /* trace_drv_get_survey(local, idx, survey, ret); */ + + trace_drv_return_int(local, ret); + return ret; } @@ -370,6 +401,7 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop) trace_drv_flush(local, drop); if (local->ops->flush) local->ops->flush(&local->hw, drop); + trace_drv_return_void(local); } static inline void drv_channel_switch(struct ieee80211_local *local, @@ -377,9 +409,9 @@ static inline void drv_channel_switch(struct ieee80211_local *local, { might_sleep(); - local->ops->channel_switch(&local->hw, ch_switch); - trace_drv_channel_switch(local, ch_switch); + local->ops->channel_switch(&local->hw, ch_switch); + trace_drv_return_void(local); } #endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 6a9b2342a9c2d3982c83d235149762271a4f9c83..5d5d2a97466837b75b14961bc3b14a48bd5d488e 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -36,20 +36,58 @@ static inline void trace_ ## name(proto) {} * Tracing for driver callbacks. */ -TRACE_EVENT(drv_start, - TP_PROTO(struct ieee80211_local *local, int ret), +TRACE_EVENT(drv_return_void, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local), + TP_STRUCT__entry( + LOCAL_ENTRY + ), + TP_fast_assign( + LOCAL_ASSIGN; + ), + TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) +); +TRACE_EVENT(drv_return_int, + TP_PROTO(struct ieee80211_local *local, int ret), TP_ARGS(local, ret), - TP_STRUCT__entry( LOCAL_ENTRY __field(int, ret) ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) +); +TRACE_EVENT(drv_return_u64, + TP_PROTO(struct ieee80211_local *local, u64 ret), + TP_ARGS(local, ret), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u64, ret) + ), TP_fast_assign( LOCAL_ASSIGN; __entry->ret = ret; ), + TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) +); + +TRACE_EVENT(drv_start, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), TP_printk( LOCAL_PR_FMT, LOCAL_PR_ARG @@ -76,28 +114,25 @@ TRACE_EVENT(drv_stop, TRACE_EVENT(drv_add_interface, TP_PROTO(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - int ret), + struct ieee80211_sub_if_data *sdata), - TP_ARGS(local, sdata, ret), + TP_ARGS(local, sdata), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY __array(char, addr, 6) - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; memcpy(__entry->addr, sdata->vif.addr, 6); - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d", - LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret + LOCAL_PR_FMT VIF_PR_FMT " addr:%pM", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr ) ); @@ -126,15 +161,13 @@ TRACE_EVENT(drv_remove_interface, TRACE_EVENT(drv_config, TP_PROTO(struct ieee80211_local *local, - u32 changed, - int ret), + u32 changed), - TP_ARGS(local, changed, ret), + TP_ARGS(local, changed), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, changed) - __field(int, ret) __field(u32, flags) __field(int, power_level) __field(int, dynamic_ps_timeout) @@ -150,7 +183,6 @@ TRACE_EVENT(drv_config, TP_fast_assign( LOCAL_ASSIGN; __entry->changed = changed; - __entry->ret = ret; __entry->flags = local->hw.conf.flags; __entry->power_level = local->hw.conf.power_level; __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; @@ -164,8 +196,8 @@ TRACE_EVENT(drv_config, ), TP_printk( - LOCAL_PR_FMT " ch:%#x freq:%d ret:%d", - LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret + LOCAL_PR_FMT " ch:%#x freq:%d", + LOCAL_PR_ARG, __entry->changed, __entry->center_freq ) ); @@ -220,26 +252,23 @@ TRACE_EVENT(drv_bss_info_changed, ); TRACE_EVENT(drv_prepare_multicast, - TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), + TP_PROTO(struct ieee80211_local *local, int mc_count), - TP_ARGS(local, mc_count, ret), + TP_ARGS(local, mc_count), TP_STRUCT__entry( LOCAL_ENTRY __field(int, mc_count) - __field(u64, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->mc_count = mc_count; - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT " prepare mc (%d): %llx", - LOCAL_PR_ARG, __entry->mc_count, - (unsigned long long) __entry->ret + LOCAL_PR_FMT " prepare mc (%d)", + LOCAL_PR_ARG, __entry->mc_count ) ); @@ -273,27 +302,25 @@ TRACE_EVENT(drv_configure_filter, TRACE_EVENT(drv_set_tim, TP_PROTO(struct ieee80211_local *local, - struct ieee80211_sta *sta, bool set, int ret), + struct ieee80211_sta *sta, bool set), - TP_ARGS(local, sta, set, ret), + TP_ARGS(local, sta, set), TP_STRUCT__entry( LOCAL_ENTRY STA_ENTRY __field(bool, set) - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; STA_ASSIGN; __entry->set = set; - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d", - LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret + LOCAL_PR_FMT STA_PR_FMT " set:%d", + LOCAL_PR_ARG, STA_PR_FMT, __entry->set ) ); @@ -301,9 +328,9 @@ TRACE_EVENT(drv_set_key, TP_PROTO(struct ieee80211_local *local, enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, int ret), + struct ieee80211_key_conf *key), - TP_ARGS(local, cmd, sdata, sta, key, ret), + TP_ARGS(local, cmd, sdata, sta, key), TP_STRUCT__entry( LOCAL_ENTRY @@ -313,7 +340,6 @@ TRACE_EVENT(drv_set_key, __field(u8, hw_key_idx) __field(u8, flags) __field(s8, keyidx) - __field(int, ret) ), TP_fast_assign( @@ -324,12 +350,11 @@ TRACE_EVENT(drv_set_key, __entry->flags = key->flags; __entry->keyidx = key->keyidx; __entry->hw_key_idx = key->hw_key_idx; - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", - LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG ) ); @@ -364,25 +389,23 @@ TRACE_EVENT(drv_update_tkip_key, TRACE_EVENT(drv_hw_scan, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, - struct cfg80211_scan_request *req, int ret), + struct cfg80211_scan_request *req), - TP_ARGS(local, sdata, req, ret), + TP_ARGS(local, sdata, req), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT " ret:%d", - LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret + LOCAL_PR_FMT VIF_PR_FMT, + LOCAL_PR_ARG,VIF_PR_ARG ) ); @@ -479,48 +502,44 @@ TRACE_EVENT(drv_get_tkip_seq, ); TRACE_EVENT(drv_set_rts_threshold, - TP_PROTO(struct ieee80211_local *local, u32 value, int ret), + TP_PROTO(struct ieee80211_local *local, u32 value), - TP_ARGS(local, value, ret), + TP_ARGS(local, value), TP_STRUCT__entry( LOCAL_ENTRY __field(u32, value) - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; - __entry->ret = ret; __entry->value = value; ), TP_printk( - LOCAL_PR_FMT " value:%d ret:%d", - LOCAL_PR_ARG, __entry->value, __entry->ret + LOCAL_PR_FMT " value:%d", + LOCAL_PR_ARG, __entry->value ) ); TRACE_EVENT(drv_set_coverage_class, - TP_PROTO(struct ieee80211_local *local, u8 value, int ret), + TP_PROTO(struct ieee80211_local *local, u8 value), - TP_ARGS(local, value, ret), + TP_ARGS(local, value), TP_STRUCT__entry( LOCAL_ENTRY __field(u8, value) - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; - __entry->ret = ret; __entry->value = value; ), TP_printk( - LOCAL_PR_FMT " value:%d ret:%d", - LOCAL_PR_ARG, __entry->value, __entry->ret + LOCAL_PR_FMT " value:%d", + LOCAL_PR_ARG, __entry->value ) ); @@ -555,27 +574,25 @@ TRACE_EVENT(drv_sta_notify, TRACE_EVENT(drv_sta_add, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, - struct ieee80211_sta *sta, int ret), + struct ieee80211_sta *sta), - TP_ARGS(local, sdata, sta, ret), + TP_ARGS(local, sdata, sta), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY STA_ENTRY - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", - LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG ) ); @@ -606,10 +623,9 @@ TRACE_EVENT(drv_sta_remove, TRACE_EVENT(drv_conf_tx, TP_PROTO(struct ieee80211_local *local, u16 queue, - const struct ieee80211_tx_queue_params *params, - int ret), + const struct ieee80211_tx_queue_params *params), - TP_ARGS(local, queue, params, ret), + TP_ARGS(local, queue, params), TP_STRUCT__entry( LOCAL_ENTRY @@ -618,13 +634,11 @@ TRACE_EVENT(drv_conf_tx, __field(u16, cw_min) __field(u16, cw_max) __field(u8, aifs) - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; __entry->queue = queue; - __entry->ret = ret; __entry->txop = params->txop; __entry->cw_max = params->cw_max; __entry->cw_min = params->cw_min; @@ -632,29 +646,27 @@ TRACE_EVENT(drv_conf_tx, ), TP_printk( - LOCAL_PR_FMT " queue:%d ret:%d", - LOCAL_PR_ARG, __entry->queue, __entry->ret + LOCAL_PR_FMT " queue:%d", + LOCAL_PR_ARG, __entry->queue ) ); TRACE_EVENT(drv_get_tsf, - TP_PROTO(struct ieee80211_local *local, u64 ret), + TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local, ret), + TP_ARGS(local), TP_STRUCT__entry( LOCAL_ENTRY - __field(u64, ret) ), TP_fast_assign( LOCAL_ASSIGN; - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT " ret:%llu", - LOCAL_PR_ARG, (unsigned long long)__entry->ret + LOCAL_PR_FMT, + LOCAL_PR_ARG ) ); @@ -698,23 +710,21 @@ TRACE_EVENT(drv_reset_tsf, ); TRACE_EVENT(drv_tx_last_beacon, - TP_PROTO(struct ieee80211_local *local, int ret), + TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local, ret), + TP_ARGS(local), TP_STRUCT__entry( LOCAL_ENTRY - __field(int, ret) ), TP_fast_assign( LOCAL_ASSIGN; - __entry->ret = ret; ), TP_printk( - LOCAL_PR_FMT " ret:%d", - LOCAL_PR_ARG, __entry->ret + LOCAL_PR_FMT, + LOCAL_PR_ARG ) ); @@ -723,9 +733,9 @@ TRACE_EVENT(drv_ampdu_action, struct ieee80211_sub_if_data *sdata, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, - u16 *ssn, int ret), + u16 *ssn), - TP_ARGS(local, sdata, action, sta, tid, ssn, ret), + TP_ARGS(local, sdata, action, sta, tid, ssn), TP_STRUCT__entry( LOCAL_ENTRY @@ -733,7 +743,6 @@ TRACE_EVENT(drv_ampdu_action, __field(u32, action) __field(u16, tid) __field(u16, ssn) - __field(int, ret) VIF_ENTRY ), @@ -741,15 +750,36 @@ TRACE_EVENT(drv_ampdu_action, LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; - __entry->ret = ret; __entry->action = action; __entry->tid = tid; __entry->ssn = ssn ? *ssn : 0; ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d", - LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid + ) +); + +TRACE_EVENT(drv_get_survey, + TP_PROTO(struct ieee80211_local *local, int idx, + struct survey_info *survey), + + TP_ARGS(local, idx, survey), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, idx) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->idx = idx; + ), + + TP_printk( + LOCAL_PR_FMT " idx:%d", + LOCAL_PR_ARG, __entry->idx ) ); @@ -851,25 +881,23 @@ TRACE_EVENT(api_start_tx_ba_cb, ); TRACE_EVENT(api_stop_tx_ba_session, - TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator), + TP_PROTO(struct ieee80211_sta *sta, u16 tid), - TP_ARGS(sta, tid, initiator), + TP_ARGS(sta, tid), TP_STRUCT__entry( STA_ENTRY __field(u16, tid) - __field(u16, initiator) ), TP_fast_assign( STA_ASSIGN; __entry->tid = tid; - __entry->initiator = initiator; ), TP_printk( - STA_PR_FMT " tid:%d initiator:%d", - STA_PR_ARG, __entry->tid, __entry->initiator + STA_PR_FMT " tid:%d", + STA_PR_ARG, __entry->tid ) ); diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 2ab106a0a49189fb23169c1d2b4b5717c67f7731..9d101fb33861f67ce1c9058b60f680709ca2d27c 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -6,7 +6,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu - * Copyright 2007-2008, Intel Corporation + * Copyright 2007-2010, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -29,7 +29,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, memset(ht_cap, 0, sizeof(*ht_cap)); - if (!ht_cap_ie) + if (!ht_cap_ie || !sband->ht_cap.ht_supported) return; ht_cap->ht_supported = true; @@ -105,6 +105,8 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) { int i; + cancel_work_sync(&sta->ampdu_mlme.work); + for (i = 0; i < STA_TID_NUM; i++) { __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR); __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, @@ -112,6 +114,43 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) } } +void ieee80211_ba_session_work(struct work_struct *work) +{ + struct sta_info *sta = + container_of(work, struct sta_info, ampdu_mlme.work); + struct tid_ampdu_tx *tid_tx; + int tid; + + /* + * When this flag is set, new sessions should be + * blocked, and existing sessions will be torn + * down by the code that set the flag, so this + * need not run. + */ + if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) + return; + + mutex_lock(&sta->ampdu_mlme.mtx); + for (tid = 0; tid < STA_TID_NUM; tid++) { + if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) + ___ieee80211_stop_rx_ba_session( + sta, tid, WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_TIMEOUT); + + tid_tx = sta->ampdu_mlme.tid_tx[tid]; + if (!tid_tx) + continue; + + if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) + ieee80211_tx_ba_session_handle_start(sta, tid); + else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, + &tid_tx->state)) + ___ieee80211_stop_tx_ba_session(sta, tid, + WLAN_BACK_INITIATOR); + } + mutex_unlock(&sta->ampdu_mlme.mtx); +} + void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u16 initiator, u16 reason_code) @@ -176,13 +215,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, if (initiator == WLAN_BACK_INITIATOR) __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); - else { /* WLAN_BACK_RECIPIENT */ - spin_lock_bh(&sta->lock); - if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) - ___ieee80211_stop_tx_ba_session(sta, tid, - WLAN_BACK_RECIPIENT); - spin_unlock_bh(&sta->lock); - } + else + __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT); } int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b2cc1fda6cfdb6ba1054aa2bb224f9fce7bba5e8..c691780725a7fcfbaef4ac39802540288696ee6a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -43,6 +43,8 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, { u16 auth_alg, auth_transaction, status_code; + lockdep_assert_held(&sdata->u.ibss.mtx); + if (len < 24 + 6) return; @@ -78,6 +80,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, u32 bss_change; u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; + lockdep_assert_held(&ifibss->mtx); + /* Reset own TSF to allow time synchronization work. */ drv_reset_tsf(local); @@ -172,11 +176,13 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, rcu_assign_pointer(ifibss->presp, skb); sdata->vif.bss_conf.beacon_int = beacon_int; + sdata->vif.bss_conf.basic_rates = basic_rates; bss_change = BSS_CHANGED_BEACON_INT; bss_change |= ieee80211_reset_erp_info(sdata); bss_change |= BSS_CHANGED_BSSID; bss_change |= BSS_CHANGED_BEACON; bss_change |= BSS_CHANGED_BEACON_ENABLED; + bss_change |= BSS_CHANGED_BASIC_RATES; bss_change |= BSS_CHANGED_IBSS; sdata->vif.bss_conf.ibss_joined = true; ieee80211_bss_info_change_notify(sdata, bss_change); @@ -203,6 +209,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, int i, j; u16 beacon_int = cbss->beacon_interval; + lockdep_assert_held(&sdata->u.ibss.mtx); + if (beacon_int < 10) beacon_int = 10; @@ -447,6 +455,8 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) int active = 0; struct sta_info *sta; + lockdep_assert_held(&sdata->u.ibss.mtx); + rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { @@ -471,6 +481,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + lockdep_assert_held(&ifibss->mtx); + mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); @@ -503,6 +515,8 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) u16 capability; int i; + lockdep_assert_held(&ifibss->mtx); + if (ifibss->fixed_bssid) { memcpy(bssid, ifibss->bssid, ETH_ALEN); } else { @@ -529,7 +543,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) sdata->drop_unencrypted = 0; __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, - ifibss->channel, 3, /* first two are basic */ + ifibss->channel, ifibss->basic_rates, capability, 0); } @@ -547,6 +561,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) int active_ibss; u16 capability; + lockdep_assert_held(&ifibss->mtx); + active_ibss = ieee80211_sta_active_ibss(sdata); #ifdef CONFIG_MAC80211_IBSS_DEBUG printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", @@ -635,6 +651,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *resp; u8 *pos, *end; + lockdep_assert_held(&ifibss->mtx); + if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || len < 24 + 2 || !ifibss->presp) return; @@ -727,8 +745,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); } -static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb) +void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; @@ -738,6 +756,8 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); + mutex_lock(&sdata->u.ibss.mtx); + switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_PROBE_REQ: ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len); @@ -755,35 +775,22 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, break; } - kfree_skb(skb); + mutex_unlock(&sdata->u.ibss.mtx); } -static void ieee80211_ibss_work(struct work_struct *work) +void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) { - struct ieee80211_sub_if_data *sdata = - container_of(work, struct ieee80211_sub_if_data, u.ibss.work); - struct ieee80211_local *local = sdata->local; - struct ieee80211_if_ibss *ifibss; - struct sk_buff *skb; - - if (WARN_ON(local->suspended)) - return; - - if (!ieee80211_sdata_running(sdata)) - return; - - if (local->scanning) - return; - - if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC)) - return; - ifibss = &sdata->u.ibss; + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - while ((skb = skb_dequeue(&ifibss->skb_queue))) - ieee80211_ibss_rx_queued_mgmt(sdata, skb); + mutex_lock(&ifibss->mtx); - if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request)) - return; + /* + * Work could be scheduled after scan or similar + * when we aren't even joined (or trying) with a + * network. + */ + if (!ifibss->ssid_len) + goto out; switch (ifibss->state) { case IEEE80211_IBSS_MLME_SEARCH: @@ -796,6 +803,9 @@ static void ieee80211_ibss_work(struct work_struct *work) WARN_ON(1); break; } + + out: + mutex_unlock(&ifibss->mtx); } static void ieee80211_ibss_timer(unsigned long data) @@ -810,8 +820,7 @@ static void ieee80211_ibss_timer(unsigned long data) return; } - set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); - ieee80211_queue_work(&local->hw, &ifibss->work); + ieee80211_queue_work(&local->hw, &sdata->work); } #ifdef CONFIG_PM @@ -819,7 +828,6 @@ void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - cancel_work_sync(&ifibss->work); if (del_timer_sync(&ifibss->timer)) ifibss->timer_running = true; } @@ -839,10 +847,9 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - INIT_WORK(&ifibss->work, ieee80211_ibss_work); setup_timer(&ifibss->timer, ieee80211_ibss_timer, (unsigned long) sdata); - skb_queue_head_init(&ifibss->skb_queue); + mutex_init(&ifibss->mtx); } /* scan finished notification */ @@ -856,45 +863,28 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) continue; if (sdata->vif.type != NL80211_IFTYPE_ADHOC) continue; - if (!sdata->u.ibss.ssid_len) - continue; sdata->u.ibss.last_scan_completed = jiffies; - mod_timer(&sdata->u.ibss.timer, 0); + ieee80211_queue_work(&local->hw, &sdata->work); } mutex_unlock(&local->iflist_mtx); } -ieee80211_rx_result -ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_mgmt *mgmt; - u16 fc; - - if (skb->len < 24) - return RX_DROP_MONITOR; - - mgmt = (struct ieee80211_mgmt *) skb->data; - fc = le16_to_cpu(mgmt->frame_control); - - switch (fc & IEEE80211_FCTL_STYPE) { - case IEEE80211_STYPE_PROBE_RESP: - case IEEE80211_STYPE_BEACON: - case IEEE80211_STYPE_PROBE_REQ: - case IEEE80211_STYPE_AUTH: - skb_queue_tail(&sdata->u.ibss.skb_queue, skb); - ieee80211_queue_work(&local->hw, &sdata->u.ibss.work); - return RX_QUEUED; - } - - return RX_DROP_MONITOR; -} - int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params) { struct sk_buff *skb; + skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + + 36 /* bitrates */ + + 34 /* SSID */ + + 3 /* DS params */ + + 4 /* IBSS params */ + + params->ie_len); + if (!skb) + return -ENOMEM; + + mutex_lock(&sdata->u.ibss.mtx); + if (params->bssid) { memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); sdata->u.ibss.fixed_bssid = true; @@ -902,6 +892,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->u.ibss.fixed_bssid = false; sdata->u.ibss.privacy = params->privacy; + sdata->u.ibss.basic_rates = params->basic_rates; sdata->vif.bss_conf.beacon_int = params->beacon_interval; @@ -922,34 +913,18 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->u.ibss.ie_len = params->ie_len; } - skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + - 36 /* bitrates */ + - 34 /* SSID */ + - 3 /* DS params */ + - 4 /* IBSS params */ + - params->ie_len); - if (!skb) - return -ENOMEM; - sdata->u.ibss.skb = skb; sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; sdata->u.ibss.ibss_join_req = jiffies; memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); - - /* - * The ssid_len setting below is used to see whether - * we are active, and we need all other settings - * before that may get visible. - */ - mb(); - sdata->u.ibss.ssid_len = params->ssid_len; ieee80211_recalc_idle(sdata->local); - set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); - ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work); + ieee80211_queue_work(&sdata->local->hw, &sdata->work); + + mutex_unlock(&sdata->u.ibss.mtx); return 0; } @@ -957,11 +932,33 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) { struct sk_buff *skb; + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct cfg80211_bss *cbss; + u16 capability; + int active_ibss; - del_timer_sync(&sdata->u.ibss.timer); - clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); - cancel_work_sync(&sdata->u.ibss.work); - clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); + mutex_lock(&sdata->u.ibss.mtx); + + active_ibss = ieee80211_sta_active_ibss(sdata); + + if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { + capability = WLAN_CAPABILITY_IBSS; + + if (ifibss->privacy) + capability |= WLAN_CAPABILITY_PRIVACY; + + cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel, + ifibss->bssid, ifibss->ssid, + ifibss->ssid_len, WLAN_CAPABILITY_IBSS | + WLAN_CAPABILITY_PRIVACY, + capability); + + if (cbss) { + cfg80211_unlink_bss(local->hw.wiphy, cbss); + cfg80211_put_bss(cbss); + } + } sta_info_flush(sdata->local, sdata); @@ -975,10 +972,14 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) synchronize_rcu(); kfree_skb(skb); - skb_queue_purge(&sdata->u.ibss.skb_queue); + skb_queue_purge(&sdata->skb_queue); memset(sdata->u.ibss.bssid, 0, ETH_ALEN); sdata->u.ibss.ssid_len = 0; + del_timer_sync(&sdata->u.ibss.timer); + + mutex_unlock(&sdata->u.ibss.mtx); + ieee80211_recalc_idle(sdata->local); return 0; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1a9e2da37a93d91320a6f20ec7594423e535da77..65e0ed6c29753c07084e9a884868df2bd40f2474 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -238,6 +238,7 @@ enum ieee80211_work_type { IEEE80211_WORK_ABORT, IEEE80211_WORK_DIRECT_PROBE, IEEE80211_WORK_AUTH, + IEEE80211_WORK_ASSOC_BEACON_WAIT, IEEE80211_WORK_ASSOC, IEEE80211_WORK_REMAIN_ON_CHANNEL, }; @@ -325,7 +326,6 @@ struct ieee80211_if_managed { struct timer_list conn_mon_timer; struct timer_list bcn_mon_timer; struct timer_list chswitch_timer; - struct work_struct work; struct work_struct monitor_work; struct work_struct chswitch_work; struct work_struct beacon_connection_loss_work; @@ -340,8 +340,6 @@ struct ieee80211_if_managed { u16 aid; - struct sk_buff_head skb_queue; - unsigned long timers_running; /* used for quiesce/restart */ bool powersave; /* powersave requested for this iface */ enum ieee80211_smps_mode req_smps, /* requested smps mode */ @@ -380,19 +378,15 @@ struct ieee80211_if_managed { int last_cqm_event_signal; }; -enum ieee80211_ibss_request { - IEEE80211_IBSS_REQ_RUN = 0, -}; - struct ieee80211_if_ibss { struct timer_list timer; - struct work_struct work; - struct sk_buff_head skb_queue; + struct mutex mtx; - unsigned long request; unsigned long last_scan_completed; + u32 basic_rates; + bool timer_running; bool fixed_bssid; @@ -416,11 +410,9 @@ struct ieee80211_if_ibss { }; struct ieee80211_if_mesh { - struct work_struct work; struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; struct timer_list mesh_path_root_timer; - struct sk_buff_head skb_queue; unsigned long timers_running; @@ -517,6 +509,11 @@ struct ieee80211_sub_if_data { u16 sequence_number; + struct work_struct work; + struct sk_buff_head skb_queue; + + bool arp_filter_state; + /* * AP this belongs to: self in AP mode and * corresponding AP in VLAN mode, NULL for @@ -569,11 +566,15 @@ ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata, #endif } +enum sdata_queue_type { + IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, + IEEE80211_SDATA_QUEUE_AGG_START = 1, + IEEE80211_SDATA_QUEUE_AGG_STOP = 2, +}; + enum { IEEE80211_RX_MSG = 1, IEEE80211_TX_STATUS_MSG = 2, - IEEE80211_DELBA_MSG = 3, - IEEE80211_ADDBA_MSG = 4, }; enum queue_stop_reason { @@ -724,13 +725,7 @@ struct ieee80211_local { struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; struct tasklet_struct tx_pending_tasklet; - /* - * This lock is used to prevent concurrent A-MPDU - * session start/stop processing, this thus also - * synchronises the ->ampdu_action() callback to - * drivers and limits it to one at a time. - */ - spinlock_t ampdu_lock; + atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES]; /* number of interfaces with corresponding IFF_ flags */ atomic_t iff_allmultis, iff_promiscs; @@ -746,10 +741,10 @@ struct ieee80211_local { struct mutex iflist_mtx; /* - * Key lock, protects sdata's key_list and sta_info's + * Key mutex, protects sdata's key_list and sta_info's * key pointers (write access, they're RCU.) */ - spinlock_t key_lock; + struct mutex key_mtx; /* Scanning and BSS list */ @@ -851,6 +846,15 @@ struct ieee80211_local { struct work_struct dynamic_ps_disable_work; struct timer_list dynamic_ps_timer; struct notifier_block network_latency_notifier; + struct notifier_block ifa_notifier; + + /* + * The dynamic ps timeout configured from user space via WEXT - + * this will override whatever chosen by mac80211 internally. + */ + int dynamic_ps_forced_timeout; + int dynamic_ps_user_timeout; + bool disable_dynamic_ps; int user_power_level; /* in dBm */ int power_constr_level; /* in dBm */ @@ -874,9 +878,8 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) return netdev_priv(dev); } -/* this struct represents 802.11n's RA/TID combination along with our vif */ +/* this struct represents 802.11n's RA/TID combination */ struct ieee80211_ra_tid { - struct ieee80211_vif *vif; u8 ra[ETH_ALEN]; u16 tid; }; @@ -985,29 +988,25 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_disassoc_request *req, void *cookie); -int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type, - const u8 *buf, size_t len, u64 *cookie); -ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb); void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); int ieee80211_max_network_latency(struct notifier_block *nb, unsigned long data, void *dummy); +int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_sw_ie *sw_elem, struct ieee80211_bss *bss, u64 timestamp); void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); -ieee80211_rx_result -ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, u8 *bssid, u8 *addr, u32 supp_rates, gfp_t gfp); @@ -1016,6 +1015,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata); +void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); + +/* mesh code */ +void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); +void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); /* scan/BSS handling */ void ieee80211_scan_work(struct work_struct *work); @@ -1084,7 +1091,7 @@ struct ieee80211_tx_status_rtap_hdr { u8 padding_for_rate; __le16 tx_flags; u8 data_retries; -} __attribute__ ((packed)); +} __packed; /* HT */ @@ -1099,6 +1106,8 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps, const u8 *da, const u8 *bssid); +void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason); void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason); void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); @@ -1118,6 +1127,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_back_parties initiator); int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_back_parties initiator); +void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); +void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); +void ieee80211_ba_session_work(struct work_struct *work); +void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 50deb017fd6e771c923f91b9e46264017a4e1cf0..ebbe264e2b0bc659b0e1303bcec285c87158df9f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -249,6 +249,8 @@ static int ieee80211_open(struct net_device *dev) local->fif_other_bss++; ieee80211_configure_filter(local); + + netif_carrier_on(dev); break; default: res = drv_add_interface(local, &sdata->vif); @@ -268,7 +270,6 @@ static int ieee80211_open(struct net_device *dev) changed |= ieee80211_reset_erp_info(sdata); ieee80211_bss_info_change_notify(sdata, changed); - ieee80211_enable_keys(sdata); if (sdata->vif.type == NL80211_IFTYPE_STATION) netif_carrier_off(dev); @@ -321,15 +322,6 @@ static int ieee80211_open(struct net_device *dev) ieee80211_recalc_ps(local, -1); - /* - * ieee80211_sta_work is disabled while network interface - * is down. Therefore, some configuration changes may not - * yet be effective. Trigger execution of ieee80211_sta_work - * to fix this. - */ - if (sdata->vif.type == NL80211_IFTYPE_STATION) - ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); - netif_tx_start_all_queues(dev); return 0; @@ -349,7 +341,6 @@ static int ieee80211_stop(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; - struct sta_info *sta; unsigned long flags; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; @@ -365,18 +356,6 @@ static int ieee80211_stop(struct net_device *dev) */ ieee80211_work_purge(sdata); - /* - * Now delete all active aggregation sessions. - */ - rcu_read_lock(); - - list_for_each_entry_rcu(sta, &local->sta_list, list) { - if (sta->sdata == sdata) - ieee80211_sta_tear_down_BA_sessions(sta); - } - - rcu_read_unlock(); - /* * Remove all stations associated with this interface. * @@ -483,27 +462,14 @@ static int ieee80211_stop(struct net_device *dev) * whether the interface is running, which, at this point, * it no longer is. */ - cancel_work_sync(&sdata->u.mgd.work); cancel_work_sync(&sdata->u.mgd.chswitch_work); cancel_work_sync(&sdata->u.mgd.monitor_work); cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); - /* - * When we get here, the interface is marked down. - * Call synchronize_rcu() to wait for the RX path - * should it be using the interface and enqueuing - * frames at this very time on another CPU. - */ - synchronize_rcu(); - skb_queue_purge(&sdata->u.mgd.skb_queue); /* fall through */ case NL80211_IFTYPE_ADHOC: - if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) del_timer_sync(&sdata->u.ibss.timer); - cancel_work_sync(&sdata->u.ibss.work); - synchronize_rcu(); - skb_queue_purge(&sdata->u.ibss.skb_queue); - } /* fall through */ case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif)) { @@ -518,6 +484,16 @@ static int ieee80211_stop(struct net_device *dev) } /* fall through */ default: + flush_work(&sdata->work); + /* + * When we get here, the interface is marked down. + * Call synchronize_rcu() to wait for the RX path + * should it be using the interface and enqueuing + * frames at this very time on another CPU. + */ + synchronize_rcu(); + skb_queue_purge(&sdata->skb_queue); + if (local->scan_sdata == sdata) ieee80211_scan_cancel(local); @@ -531,8 +507,8 @@ static int ieee80211_stop(struct net_device *dev) BSS_CHANGED_BEACON_ENABLED); } - /* disable all keys for as long as this netdev is down */ - ieee80211_disable_keys(sdata); + /* free all remaining keys, there shouldn't be any */ + ieee80211_free_keys(sdata); drv_remove_interface(local, &sdata->vif); } @@ -727,6 +703,136 @@ static void ieee80211_if_setup(struct net_device *dev) dev->destructor = free_netdev; } +static void ieee80211_iface_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, work); + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct sta_info *sta; + struct ieee80211_ra_tid *ra_tid; + + if (!ieee80211_sdata_running(sdata)) + return; + + if (local->scanning) + return; + + /* + * ieee80211_queue_work() should have picked up most cases, + * here we'll pick the rest. + */ + if (WARN(local->suspended, + "interface work scheduled while going to suspend\n")) + return; + + /* first process frames */ + while ((skb = skb_dequeue(&sdata->skb_queue))) { + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { + ra_tid = (void *)&skb->cb; + ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, + ra_tid->tid); + } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { + ra_tid = (void *)&skb->cb; + ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, + ra_tid->tid); + } else if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK) { + int len = skb->len; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + if (sta) { + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + ieee80211_process_addba_request( + local, sta, mgmt, len); + break; + case WLAN_ACTION_ADDBA_RESP: + ieee80211_process_addba_resp(local, sta, + mgmt, len); + break; + case WLAN_ACTION_DELBA: + ieee80211_process_delba(sdata, sta, + mgmt, len); + break; + default: + WARN_ON(1); + break; + } + } + mutex_unlock(&local->sta_mtx); + } else if (ieee80211_is_data_qos(mgmt->frame_control)) { + struct ieee80211_hdr *hdr = (void *)mgmt; + /* + * So the frame isn't mgmt, but frame_control + * is at the right place anyway, of course, so + * the if statement is correct. + * + * Warn if we have other data frame types here, + * they must not get here. + */ + WARN_ON(hdr->frame_control & + cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); + WARN_ON(!(hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG))); + /* + * This was a fragment of a frame, received while + * a block-ack session was active. That cannot be + * right, so terminate the session. + */ + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + if (sta) { + u16 tid = *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_TID_MASK; + + __ieee80211_stop_rx_ba_session( + sta, tid, WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_REQUIRE_SETUP); + } + mutex_unlock(&local->sta_mtx); + } else switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ieee80211_sta_rx_queued_mgmt(sdata, skb); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_rx_queued_mgmt(sdata, skb); + break; + case NL80211_IFTYPE_MESH_POINT: + if (!ieee80211_vif_is_mesh(&sdata->vif)) + break; + ieee80211_mesh_rx_queued_mgmt(sdata, skb); + break; + default: + WARN(1, "frame for unexpected interface type"); + break; + } + + kfree_skb(skb); + } + + /* then other type-dependent work */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ieee80211_sta_work(sdata); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_work(sdata); + break; + case NL80211_IFTYPE_MESH_POINT: + if (!ieee80211_vif_is_mesh(&sdata->vif)) + break; + ieee80211_mesh_work(sdata); + break; + default: + break; + } +} + + /* * Helper function to initialise an interface to a specific type. */ @@ -744,6 +850,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, /* only monitor differs */ sdata->dev->type = ARPHRD_ETHER; + skb_queue_head_init(&sdata->skb_queue); + INIT_WORK(&sdata->work, ieee80211_iface_work); + switch (type) { case NL80211_IFTYPE_AP: skb_queue_head_init(&sdata->u.ap.ps_bc_buf); @@ -969,6 +1078,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, sdata->wdev.wiphy = local->hw.wiphy; sdata->local = local; sdata->dev = ndev; +#ifdef CONFIG_INET + sdata->arp_filter_state = true; +#endif for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) skb_queue_head_init(&sdata->fragments[i].skb_list); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index e8f6e3b252d8b8ee3ef4210695dfc44283534bc4..1b9d87ed143a1a3b3e46e31f5649ba67f68321b2 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -36,80 +36,20 @@ * There is currently no way of knowing this except by looking into * debugfs. * - * All key operations are protected internally so you can call them at - * any time. + * All key operations are protected internally. * * Within mac80211, key references are, just as STA structure references, * protected by RCU. Note, however, that some things are unprotected, * namely the key->sta dereferences within the hardware acceleration - * functions. This means that sta_info_destroy() must flush the key todo - * list. - * - * All the direct key list manipulation functions must not sleep because - * they can operate on STA info structs that are protected by RCU. + * functions. This means that sta_info_destroy() must remove the key + * which waits for an RCU grace period. */ static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; -/* key mutex: used to synchronise todo runners */ -static DEFINE_MUTEX(key_mutex); -static DEFINE_SPINLOCK(todo_lock); -static LIST_HEAD(todo_list); - -static void key_todo(struct work_struct *work) -{ - ieee80211_key_todo(); -} - -static DECLARE_WORK(todo_work, key_todo); - -/** - * add_todo - add todo item for a key - * - * @key: key to add to do item for - * @flag: todo flag(s) - * - * Must be called with IRQs or softirqs disabled. - */ -static void add_todo(struct ieee80211_key *key, u32 flag) -{ - if (!key) - return; - - spin_lock(&todo_lock); - key->flags |= flag; - /* - * Remove again if already on the list so that we move it to the end. - */ - if (!list_empty(&key->todo)) - list_del(&key->todo); - list_add_tail(&key->todo, &todo_list); - schedule_work(&todo_work); - spin_unlock(&todo_lock); -} - -/** - * ieee80211_key_lock - lock the mac80211 key operation lock - * - * This locks the (global) mac80211 key operation lock, all - * key operations must be done under this lock. - */ -static void ieee80211_key_lock(void) -{ - mutex_lock(&key_mutex); -} - -/** - * ieee80211_key_unlock - unlock the mac80211 key operation lock - */ -static void ieee80211_key_unlock(void) -{ - mutex_unlock(&key_mutex); -} - -static void assert_key_lock(void) +static void assert_key_lock(struct ieee80211_local *local) { - WARN_ON(!mutex_is_locked(&key_mutex)); + WARN_ON(!mutex_is_locked(&local->key_mtx)); } static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) @@ -126,12 +66,13 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) struct ieee80211_sta *sta; int ret; - assert_key_lock(); might_sleep(); if (!key->local->ops->set_key) return; + assert_key_lock(key->local); + sta = get_sta_for_key(key); sdata = key->sdata; @@ -142,11 +83,8 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); - if (!ret) { - spin_lock_bh(&todo_lock); + if (!ret) key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; - spin_unlock_bh(&todo_lock); - } if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) printk(KERN_ERR "mac80211-%s: failed to set key " @@ -161,18 +99,15 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) struct ieee80211_sta *sta; int ret; - assert_key_lock(); might_sleep(); if (!key || !key->local->ops->set_key) return; - spin_lock_bh(&todo_lock); - if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { - spin_unlock_bh(&todo_lock); + assert_key_lock(key->local); + + if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; - } - spin_unlock_bh(&todo_lock); sta = get_sta_for_key(key); sdata = key->sdata; @@ -191,9 +126,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) wiphy_name(key->local->hw.wiphy), key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); - spin_lock_bh(&todo_lock); key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; - spin_unlock_bh(&todo_lock); } static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, @@ -201,22 +134,24 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, { struct ieee80211_key *key = NULL; + assert_key_lock(sdata->local); + if (idx >= 0 && idx < NUM_DEFAULT_KEYS) key = sdata->keys[idx]; rcu_assign_pointer(sdata->default_key, key); - if (key) - add_todo(key, KEY_FLAG_TODO_DEFKEY); + if (key) { + ieee80211_debugfs_key_remove_default(key->sdata); + ieee80211_debugfs_key_add_default(key->sdata); + } } void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) { - unsigned long flags; - - spin_lock_irqsave(&sdata->local->key_lock, flags); + mutex_lock(&sdata->local->key_mtx); __ieee80211_set_default_key(sdata, idx); - spin_unlock_irqrestore(&sdata->local->key_lock, flags); + mutex_unlock(&sdata->local->key_mtx); } static void @@ -224,24 +159,26 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) { struct ieee80211_key *key = NULL; + assert_key_lock(sdata->local); + if (idx >= NUM_DEFAULT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) key = sdata->keys[idx]; rcu_assign_pointer(sdata->default_mgmt_key, key); - if (key) - add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY); + if (key) { + ieee80211_debugfs_key_remove_mgmt_default(key->sdata); + ieee80211_debugfs_key_add_mgmt_default(key->sdata); + } } void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) { - unsigned long flags; - - spin_lock_irqsave(&sdata->local->key_lock, flags); + mutex_lock(&sdata->local->key_mtx); __ieee80211_set_default_mgmt_key(sdata, idx); - spin_unlock_irqrestore(&sdata->local->key_lock, flags); + mutex_unlock(&sdata->local->key_mtx); } @@ -336,7 +273,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, key->conf.iv_len = CCMP_HDR_LEN; key->conf.icv_len = CCMP_MIC_LEN; if (seq) { - for (i = 0; i < NUM_RX_DATA_QUEUES; i++) + for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) for (j = 0; j < CCMP_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = seq[CCMP_PN_LEN - j - 1]; @@ -352,7 +289,6 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, } memcpy(key->conf.key, key_data, key_len); INIT_LIST_HEAD(&key->list); - INIT_LIST_HEAD(&key->todo); if (alg == ALG_CCMP) { /* @@ -382,12 +318,29 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, return key; } +static void __ieee80211_key_destroy(struct ieee80211_key *key) +{ + if (!key) + return; + + if (key->local) + ieee80211_key_disable_hw_accel(key); + + if (key->conf.alg == ALG_CCMP) + ieee80211_aes_key_free(key->u.ccmp.tfm); + if (key->conf.alg == ALG_AES_CMAC) + ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); + if (key->local) + ieee80211_debugfs_key_remove(key); + + kfree(key); +} + void ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct ieee80211_key *old_key; - unsigned long flags; int idx; BUG_ON(!sdata); @@ -431,7 +384,7 @@ void ieee80211_key_link(struct ieee80211_key *key, } } - spin_lock_irqsave(&sdata->local->key_lock, flags); + mutex_lock(&sdata->local->key_mtx); if (sta) old_key = sta->key; @@ -439,15 +392,13 @@ void ieee80211_key_link(struct ieee80211_key *key, old_key = sdata->keys[idx]; __ieee80211_key_replace(sdata, sta, old_key, key); + __ieee80211_key_destroy(old_key); - /* free old key later */ - add_todo(old_key, KEY_FLAG_TODO_DELETE); + ieee80211_debugfs_key_add(key); - add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); - if (ieee80211_sdata_running(sdata)) - add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); + ieee80211_key_enable_hw_accel(key); - spin_unlock_irqrestore(&sdata->local->key_lock, flags); + mutex_unlock(&sdata->local->key_mtx); } static void __ieee80211_key_free(struct ieee80211_key *key) @@ -458,170 +409,62 @@ static void __ieee80211_key_free(struct ieee80211_key *key) if (key->sdata) __ieee80211_key_replace(key->sdata, key->sta, key, NULL); - - add_todo(key, KEY_FLAG_TODO_DELETE); + __ieee80211_key_destroy(key); } -void ieee80211_key_free(struct ieee80211_key *key) +void ieee80211_key_free(struct ieee80211_local *local, + struct ieee80211_key *key) { - unsigned long flags; - if (!key) return; - if (!key->sdata) { - /* The key has not been linked yet, simply free it - * and don't Oops */ - if (key->conf.alg == ALG_CCMP) - ieee80211_aes_key_free(key->u.ccmp.tfm); - kfree(key); - return; - } - - spin_lock_irqsave(&key->sdata->local->key_lock, flags); + mutex_lock(&local->key_mtx); __ieee80211_key_free(key); - spin_unlock_irqrestore(&key->sdata->local->key_lock, flags); + mutex_unlock(&local->key_mtx); } -/* - * To be safe against concurrent manipulations of the list (which shouldn't - * actually happen) we need to hold the spinlock. But under the spinlock we - * can't actually do much, so we defer processing to the todo list. Then run - * the todo list to be sure the operation and possibly previously pending - * operations are completed. - */ -static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata, - u32 todo_flags) +void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key; - unsigned long flags; - - might_sleep(); - - spin_lock_irqsave(&sdata->local->key_lock, flags); - list_for_each_entry(key, &sdata->key_list, list) - add_todo(key, todo_flags); - spin_unlock_irqrestore(&sdata->local->key_lock, flags); - ieee80211_key_todo(); -} - -void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) -{ ASSERT_RTNL(); if (WARN_ON(!ieee80211_sdata_running(sdata))) return; - ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); -} + mutex_lock(&sdata->local->key_mtx); -void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) -{ - ASSERT_RTNL(); - - ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE); -} - -static void __ieee80211_key_destroy(struct ieee80211_key *key) -{ - if (!key) - return; - - ieee80211_key_disable_hw_accel(key); - - if (key->conf.alg == ALG_CCMP) - ieee80211_aes_key_free(key->u.ccmp.tfm); - if (key->conf.alg == ALG_AES_CMAC) - ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); - ieee80211_debugfs_key_remove(key); + list_for_each_entry(key, &sdata->key_list, list) + ieee80211_key_enable_hw_accel(key); - kfree(key); + mutex_unlock(&sdata->local->key_mtx); } -static void __ieee80211_key_todo(void) +void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key; - bool work_done; - u32 todoflags; - /* - * NB: sta_info_destroy relies on this! - */ - synchronize_rcu(); - - spin_lock_bh(&todo_lock); - while (!list_empty(&todo_list)) { - key = list_first_entry(&todo_list, struct ieee80211_key, todo); - list_del_init(&key->todo); - todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS | - KEY_FLAG_TODO_DEFKEY | - KEY_FLAG_TODO_DEFMGMTKEY | - KEY_FLAG_TODO_HWACCEL_ADD | - KEY_FLAG_TODO_HWACCEL_REMOVE | - KEY_FLAG_TODO_DELETE); - key->flags &= ~todoflags; - spin_unlock_bh(&todo_lock); - - work_done = false; - - if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) { - ieee80211_debugfs_key_add(key); - work_done = true; - } - if (todoflags & KEY_FLAG_TODO_DEFKEY) { - ieee80211_debugfs_key_remove_default(key->sdata); - ieee80211_debugfs_key_add_default(key->sdata); - work_done = true; - } - if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) { - ieee80211_debugfs_key_remove_mgmt_default(key->sdata); - ieee80211_debugfs_key_add_mgmt_default(key->sdata); - work_done = true; - } - if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) { - ieee80211_key_enable_hw_accel(key); - work_done = true; - } - if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) { - ieee80211_key_disable_hw_accel(key); - work_done = true; - } - if (todoflags & KEY_FLAG_TODO_DELETE) { - __ieee80211_key_destroy(key); - work_done = true; - } + ASSERT_RTNL(); - WARN_ON(!work_done); + mutex_lock(&sdata->local->key_mtx); - spin_lock_bh(&todo_lock); - } - spin_unlock_bh(&todo_lock); -} + list_for_each_entry(key, &sdata->key_list, list) + ieee80211_key_disable_hw_accel(key); -void ieee80211_key_todo(void) -{ - ieee80211_key_lock(); - __ieee80211_key_todo(); - ieee80211_key_unlock(); + mutex_unlock(&sdata->local->key_mtx); } void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key, *tmp; - unsigned long flags; - ieee80211_key_lock(); + mutex_lock(&sdata->local->key_mtx); ieee80211_debugfs_key_remove_default(sdata); ieee80211_debugfs_key_remove_mgmt_default(sdata); - spin_lock_irqsave(&sdata->local->key_lock, flags); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) __ieee80211_key_free(key); - spin_unlock_irqrestore(&sdata->local->key_lock, flags); - - __ieee80211_key_todo(); - ieee80211_key_unlock(); + mutex_unlock(&sdata->local->key_mtx); } diff --git a/net/mac80211/key.h b/net/mac80211/key.h index bdc2968c2bbe2d72cbe1c55d554a67ff65887b17..b665bbb7a4711a7bcef6b7c27b41fd331d458d34 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -38,25 +38,9 @@ struct sta_info; * * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present * in the hardware for TX crypto hardware acceleration. - * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an - * RCU grace period, no longer be reachable other than from the - * todo list. - * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration. - * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware - * acceleration. - * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated. - * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs. - * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs - * to be updated. */ enum ieee80211_internal_key_flags { KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), - KEY_FLAG_TODO_DELETE = BIT(1), - KEY_FLAG_TODO_HWACCEL_ADD = BIT(2), - KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3), - KEY_FLAG_TODO_DEFKEY = BIT(4), - KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), - KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), }; enum ieee80211_internal_tkip_state { @@ -79,10 +63,8 @@ struct ieee80211_key { /* for sdata list */ struct list_head list; - /* for todo list */ - struct list_head todo; - /* protected by todo lock! */ + /* protected by key mutex */ unsigned int flags; union { @@ -95,7 +77,13 @@ struct ieee80211_key { } tkip; struct { u8 tx_pn[6]; - u8 rx_pn[NUM_RX_DATA_QUEUES][6]; + /* + * Last received packet number. The first + * NUM_RX_DATA_QUEUES counters are used with Data + * frames and the last counter is used with Robust + * Management frames. + */ + u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6]; struct crypto_cipher *tfm; u32 replays; /* dot11RSNAStatsCCMPReplays */ /* scratch buffers for virt_to_page() (crypto API) */ @@ -147,7 +135,8 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, void ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_sub_if_data *sdata, struct sta_info *sta); -void ieee80211_key_free(struct ieee80211_key *key); +void ieee80211_key_free(struct ieee80211_local *local, + struct ieee80211_key *key); void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx); @@ -155,6 +144,4 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); -void ieee80211_key_todo(void); - #endif /* IEEE80211_KEY_H */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 22a384dfab65d1bd3a8539f39066d8401ff0f649..7cc4f913a43112cfd789bf841163f174aae059ba 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -106,12 +107,15 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) if (scan_chan) { chan = scan_chan; channel_type = NL80211_CHAN_NO_HT; + local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; } else if (local->tmp_channel) { chan = scan_chan = local->tmp_channel; channel_type = local->tmp_channel_type; + local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; } else { chan = local->oper_channel; channel_type = local->_oper_channel_type; + local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; } if (chan != local->hw.conf.channel || @@ -259,7 +263,6 @@ static void ieee80211_tasklet_handler(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *) data; struct sk_buff *skb; - struct ieee80211_ra_tid *ra_tid; while ((skb = skb_dequeue(&local->skb_queue)) || (skb = skb_dequeue(&local->skb_queue_unreliable))) { @@ -274,18 +277,6 @@ static void ieee80211_tasklet_handler(unsigned long data) skb->pkt_type = 0; ieee80211_tx_status(local_to_hw(local), skb); break; - case IEEE80211_DELBA_MSG: - ra_tid = (struct ieee80211_ra_tid *) &skb->cb; - ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra, - ra_tid->tid); - dev_kfree_skb(skb); - break; - case IEEE80211_ADDBA_MSG: - ra_tid = (struct ieee80211_ra_tid *) &skb->cb; - ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra, - ra_tid->tid); - dev_kfree_skb(skb); - break ; default: WARN(1, "mac80211: Packet is of unknown type %d\n", skb->pkt_type); @@ -329,6 +320,76 @@ static void ieee80211_recalc_smps_work(struct work_struct *work) mutex_unlock(&local->iflist_mtx); } +#ifdef CONFIG_INET +static int ieee80211_ifa_changed(struct notifier_block *nb, + unsigned long data, void *arg) +{ + struct in_ifaddr *ifa = arg; + struct ieee80211_local *local = + container_of(nb, struct ieee80211_local, + ifa_notifier); + struct net_device *ndev = ifa->ifa_dev->dev; + struct wireless_dev *wdev = ndev->ieee80211_ptr; + struct in_device *idev; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_bss_conf *bss_conf; + struct ieee80211_if_managed *ifmgd; + int c = 0; + + if (!netif_running(ndev)) + return NOTIFY_DONE; + + /* Make sure it's our interface that got changed */ + if (!wdev) + return NOTIFY_DONE; + + if (wdev->wiphy != local->hw.wiphy) + return NOTIFY_DONE; + + sdata = IEEE80211_DEV_TO_SUB_IF(ndev); + bss_conf = &sdata->vif.bss_conf; + + /* ARP filtering is only supported in managed mode */ + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return NOTIFY_DONE; + + idev = sdata->dev->ip_ptr; + if (!idev) + return NOTIFY_DONE; + + ifmgd = &sdata->u.mgd; + mutex_lock(&ifmgd->mtx); + + /* Copy the addresses to the bss_conf list */ + ifa = idev->ifa_list; + while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) { + bss_conf->arp_addr_list[c] = ifa->ifa_address; + ifa = ifa->ifa_next; + c++; + } + + /* If not all addresses fit the list, disable filtering */ + if (ifa) { + sdata->arp_filter_state = false; + c = 0; + } else { + sdata->arp_filter_state = true; + } + bss_conf->arp_addr_cnt = c; + + /* Configure driver only if associated */ + if (ifmgd->associated) { + bss_conf->arp_filter_enabled = sdata->arp_filter_state; + ieee80211_bss_info_change_notify(sdata, + BSS_CHANGED_ARP_FILTER); + } + + mutex_unlock(&ifmgd->mtx); + + return NOTIFY_DONE; +} +#endif + struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops) { @@ -396,7 +457,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, mutex_init(&local->iflist_mtx); mutex_init(&local->scan_mtx); - spin_lock_init(&local->key_lock); + mutex_init(&local->key_mtx); spin_lock_init(&local->filter_lock); spin_lock_init(&local->queue_stop_reason_lock); @@ -419,8 +480,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, sta_info_init(local); - for (i = 0; i < IEEE80211_MAX_QUEUES; i++) + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_head_init(&local->pending[i]); + atomic_set(&local->agg_queue_stop[i], 0); + } tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, (unsigned long)local); @@ -431,8 +494,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); - spin_lock_init(&local->ampdu_lock); - return local_to_hw(local); } EXPORT_SYMBOL(ieee80211_alloc_hw); @@ -572,18 +633,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->hw.conf.listen_interval = local->hw.max_listen_interval; - local->hw.conf.dynamic_ps_forced_timeout = -1; + local->dynamic_ps_forced_timeout = -1; result = sta_info_start(local); if (result < 0) goto fail_sta_info; result = ieee80211_wep_init(local); - if (result < 0) { + if (result < 0) printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", wiphy_name(local->hw.wiphy), result); - goto fail_wep; - } rtnl_lock(); @@ -612,21 +671,30 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) ieee80211_max_network_latency; result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, &local->network_latency_notifier); - if (result) { rtnl_lock(); goto fail_pm_qos; } +#ifdef CONFIG_INET + local->ifa_notifier.notifier_call = ieee80211_ifa_changed; + result = register_inetaddr_notifier(&local->ifa_notifier); + if (result) + goto fail_ifa; +#endif + return 0; + fail_ifa: + pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, + &local->network_latency_notifier); + rtnl_lock(); fail_pm_qos: ieee80211_led_exit(local); ieee80211_remove_interfaces(local); fail_rate: rtnl_unlock(); ieee80211_wep_free(local); - fail_wep: sta_info_stop(local); fail_sta_info: destroy_workqueue(local->workqueue); @@ -647,6 +715,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, &local->network_latency_notifier); +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&local->ifa_notifier); +#endif rtnl_lock(); @@ -704,6 +775,10 @@ static int __init ieee80211_init(void) if (ret) return ret; + ret = rc80211_minstrel_ht_init(); + if (ret) + goto err_minstrel; + ret = rc80211_pid_init(); if (ret) goto err_pid; @@ -716,6 +791,8 @@ static int __init ieee80211_init(void) err_netdev: rc80211_pid_exit(); err_pid: + rc80211_minstrel_ht_exit(); + err_minstrel: rc80211_minstrel_exit(); return ret; @@ -724,6 +801,7 @@ static int __init ieee80211_init(void) static void __exit ieee80211_exit(void) { rc80211_pid_exit(); + rc80211_minstrel_ht_exit(); rc80211_minstrel_exit(); /* diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index bde81031727a6a78cadfa070f95ead4f45b3538b..c8a4f19ed13bb810a97bb5e25200a74cdd55a31c 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -54,7 +54,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) return; } - ieee80211_queue_work(&local->hw, &ifmsh->work); + ieee80211_queue_work(&local->hw, &sdata->work); } /** @@ -345,7 +345,7 @@ static void ieee80211_mesh_path_timer(unsigned long data) return; } - ieee80211_queue_work(&local->hw, &ifmsh->work); + ieee80211_queue_work(&local->hw, &sdata->work); } static void ieee80211_mesh_path_root_timer(unsigned long data) @@ -362,7 +362,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data) return; } - ieee80211_queue_work(&local->hw, &ifmsh->work); + ieee80211_queue_work(&local->hw, &sdata->work); } void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) @@ -484,9 +484,6 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - /* might restart the timer but that doesn't matter */ - cancel_work_sync(&ifmsh->work); - /* use atomic bitops in case both timers fire at the same time */ if (del_timer_sync(&ifmsh->housekeeping_timer)) @@ -518,7 +515,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); - ieee80211_queue_work(&local->hw, &ifmsh->work); + ieee80211_queue_work(&local->hw, &sdata->work); sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | @@ -536,16 +533,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) * whether the interface is running, which, at this point, * it no longer is. */ - cancel_work_sync(&sdata->u.mesh.work); - - /* - * When we get here, the interface is marked down. - * Call synchronize_rcu() to wait for the RX path - * should it be using the interface and enqueuing - * frames at this very time on another CPU. - */ - rcu_barrier(); /* Wait for RX path and call_rcu()'s */ - skb_queue_purge(&sdata->u.mesh.skb_queue); + cancel_work_sync(&sdata->work); } static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, @@ -608,8 +596,8 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, } } -static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb) +void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_if_mesh *ifmsh; @@ -632,26 +620,11 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); break; } - - kfree_skb(skb); } -static void ieee80211_mesh_work(struct work_struct *work) +void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) { - struct ieee80211_sub_if_data *sdata = - container_of(work, struct ieee80211_sub_if_data, u.mesh.work); - struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct sk_buff *skb; - - if (!ieee80211_sdata_running(sdata)) - return; - - if (local->scanning) - return; - - while ((skb = skb_dequeue(&ifmsh->skb_queue))) - ieee80211_mesh_rx_queued_mgmt(sdata, skb); if (ifmsh->preq_queue_len && time_after(jiffies, @@ -678,7 +651,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) if (ieee80211_vif_is_mesh(&sdata->vif)) - ieee80211_queue_work(&local->hw, &sdata->u.mesh.work); + ieee80211_queue_work(&local->hw, &sdata->work); rcu_read_unlock(); } @@ -686,11 +659,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - INIT_WORK(&ifmsh->work, ieee80211_mesh_work); setup_timer(&ifmsh->housekeeping_timer, ieee80211_mesh_housekeeping_timer, (unsigned long) sdata); - skb_queue_head_init(&sdata->u.mesh.skb_queue); ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; @@ -731,29 +702,3 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) INIT_LIST_HEAD(&ifmsh->preq_queue.list); spin_lock_init(&ifmsh->mesh_preq_queue_lock); } - -ieee80211_rx_result -ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee80211_mgmt *mgmt; - u16 fc; - - if (skb->len < 24) - return RX_DROP_MONITOR; - - mgmt = (struct ieee80211_mgmt *) skb->data; - fc = le16_to_cpu(mgmt->frame_control); - - switch (fc & IEEE80211_FCTL_STYPE) { - case IEEE80211_STYPE_ACTION: - case IEEE80211_STYPE_PROBE_RESP: - case IEEE80211_STYPE_BEACON: - skb_queue_tail(&ifmsh->skb_queue, skb); - ieee80211_queue_work(&local->hw, &ifmsh->work); - return RX_QUEUED; - } - - return RX_CONTINUE; -} diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index c88087f1cd0ff79a4a5f28c57f2626d5c7ce8eac..ebd3f1d9d88954c296fba7ca03959cadc42ab223 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -237,8 +237,6 @@ void ieee80211s_update_metric(struct ieee80211_local *local, struct sta_info *stainfo, struct sk_buff *skb); void ieee80211s_stop(void); void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); -ieee80211_rx_result -ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 0705018d8d1e7ac9a185f3bea2fffe43aaaaed24..829e08a657d078792f789696bab1a5411aeaa863 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -805,14 +805,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) spin_unlock(&ifmsh->mesh_preq_queue_lock); if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) - ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); + ieee80211_queue_work(&sdata->local->hw, &sdata->work); else if (time_before(jiffies, ifmsh->last_preq)) { /* avoid long wait if did not send preqs for a long time * and jiffies wrapped around */ ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; - ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); + ieee80211_queue_work(&sdata->local->hw, &sdata->work); } else mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + min_preq_int_jiff(sdata)); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 181ffd6efd816e859d92274d459da41ab749c1b3..349e466cf08b06825a45d5923dae2c167fa0d82f 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -315,7 +315,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) read_unlock(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); - ieee80211_queue_work(&local->hw, &ifmsh->work); + ieee80211_queue_work(&local->hw, &sdata->work); } return 0; @@ -425,7 +425,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) read_unlock(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); - ieee80211_queue_work(&local->hw, &ifmsh->work); + ieee80211_queue_work(&local->hw, &sdata->work); } return 0; diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 3cd5f7b5d693be131e75baeb8f7474fb4ab2bef3..ea13a80a476c1fe0db6dc5e0059e2014f340926e 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -65,7 +65,6 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); mesh_accept_plinks_update(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } static inline @@ -73,7 +72,6 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); mesh_accept_plinks_update(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } /** @@ -115,7 +113,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, } /** - * mesh_plink_deactivate - deactivate mesh peer link + * __mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * @@ -123,18 +121,23 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, * * Locking: the caller must hold sta->lock */ -static void __mesh_plink_deactivate(struct sta_info *sta) +static bool __mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; + bool deactivated = false; - if (sta->plink_state == PLINK_ESTAB) + if (sta->plink_state == PLINK_ESTAB) { mesh_plink_dec_estab_count(sdata); + deactivated = true; + } sta->plink_state = PLINK_BLOCKED; mesh_path_flush_by_nexthop(sta); + + return deactivated; } /** - * __mesh_plink_deactivate - deactivate mesh peer link + * mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * @@ -142,9 +145,15 @@ static void __mesh_plink_deactivate(struct sta_info *sta) */ void mesh_plink_deactivate(struct sta_info *sta) { + struct ieee80211_sub_if_data *sdata = sta->sdata; + bool deactivated; + spin_lock_bh(&sta->lock); - __mesh_plink_deactivate(sta); + deactivated = __mesh_plink_deactivate(sta); spin_unlock_bh(&sta->lock); + + if (deactivated) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, @@ -381,10 +390,16 @@ int mesh_plink_open(struct sta_info *sta) void mesh_plink_block(struct sta_info *sta) { + struct ieee80211_sub_if_data *sdata = sta->sdata; + bool deactivated; + spin_lock_bh(&sta->lock); - __mesh_plink_deactivate(sta); + deactivated = __mesh_plink_deactivate(sta); sta->plink_state = PLINK_BLOCKED; spin_unlock_bh(&sta->lock); + + if (deactivated) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } @@ -397,6 +412,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m enum plink_event event; enum plink_frame_type ftype; size_t baselen; + bool deactivated; u8 ie_len; u8 *baseaddr; __le16 plid, llid, reason; @@ -651,8 +667,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case CNF_ACPT: del_timer(&sta->plink_timer); sta->plink_state = PLINK_ESTAB; - mesh_plink_inc_estab_count(sdata); spin_unlock_bh(&sta->lock); + mesh_plink_inc_estab_count(sdata); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); break; @@ -684,8 +701,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case OPN_ACPT: del_timer(&sta->plink_timer); sta->plink_state = PLINK_ESTAB; - mesh_plink_inc_estab_count(sdata); spin_unlock_bh(&sta->lock); + mesh_plink_inc_estab_count(sdata); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, @@ -702,11 +720,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case CLS_ACPT: reason = cpu_to_le16(MESH_CLOSE_RCVD); sta->reason = reason; - __mesh_plink_deactivate(sta); + deactivated = __mesh_plink_deactivate(sta); sta->plink_state = PLINK_HOLDING; llid = sta->llid; mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); spin_unlock_bh(&sta->lock); + if (deactivated) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f803f8b72a930dd58b60ae362ea1b7117f1247b2..b6c163ac22da39fd3327a30f77478e27cb81756c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -478,6 +478,39 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, } } +void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_conf *conf = &local->hw.conf; + + WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION || + !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) || + (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)); + + local->disable_dynamic_ps = false; + conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout; +} +EXPORT_SYMBOL(ieee80211_enable_dyn_ps); + +void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_conf *conf = &local->hw.conf; + + WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION || + !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) || + (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)); + + local->disable_dynamic_ps = true; + conf->dynamic_ps_timeout = 0; + del_timer_sync(&local->dynamic_ps_timer); + ieee80211_queue_work(&local->hw, + &local->dynamic_ps_enable_work); +} +EXPORT_SYMBOL(ieee80211_disable_dyn_ps); + /* powersave */ static void ieee80211_enable_ps(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) @@ -553,6 +586,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) found->u.mgd.associated->beacon_ies && !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL))) { + struct ieee80211_conf *conf = &local->hw.conf; s32 beaconint_us; if (latency < 0) @@ -561,25 +595,24 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) beaconint_us = ieee80211_tu_to_usec( found->vif.bss_conf.beacon_int); - timeout = local->hw.conf.dynamic_ps_forced_timeout; + timeout = local->dynamic_ps_forced_timeout; if (timeout < 0) { /* + * Go to full PSM if the user configures a very low + * latency requirement. * The 2 second value is there for compatibility until * the PM_QOS_NETWORK_LATENCY is configured with real * values. */ - if (latency == 2000000000) - timeout = 100; - else if (latency <= 50000) - timeout = 300; - else if (latency <= 100000) - timeout = 100; - else if (latency <= 500000) - timeout = 50; - else + if (latency > 1900000000 && latency != 2000000000) timeout = 0; + else + timeout = 100; } - local->hw.conf.dynamic_ps_timeout = timeout; + local->dynamic_ps_user_timeout = timeout; + if (!local->disable_dynamic_ps) + conf->dynamic_ps_timeout = + local->dynamic_ps_user_timeout; if (beaconint_us > latency) { local->ps_sdata = NULL; @@ -665,10 +698,11 @@ void ieee80211_dynamic_ps_timer(unsigned long data) /* MLME */ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, - struct ieee80211_if_managed *ifmgd, + struct ieee80211_sub_if_data *sdata, u8 *wmm_param, size_t wmm_param_len) { struct ieee80211_tx_queue_params params; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t left; int count; u8 *pos, uapsd_queues = 0; @@ -757,8 +791,8 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, } /* enable WMM or activate new settings */ - local->hw.conf.flags |= IEEE80211_CONF_QOS; - drv_config(local, IEEE80211_CONF_CHANGE_QOS); + sdata->vif.bss_conf.qos = true; + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); } static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, @@ -806,11 +840,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, { struct ieee80211_bss *bss = (void *)cbss->priv; struct ieee80211_local *local = sdata->local; + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; bss_info_changed |= BSS_CHANGED_ASSOC; /* set timing information */ - sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; - sdata->vif.bss_conf.timestamp = cbss->tsf; + bss_conf->beacon_int = cbss->beacon_interval; + bss_conf->timestamp = cbss->tsf; bss_info_changed |= BSS_CHANGED_BEACON_INT; bss_info_changed |= ieee80211_handle_bss_capability(sdata, @@ -835,7 +870,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, ieee80211_led_assoc(local, 1); - sdata->vif.bss_conf.assoc = 1; + if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) + bss_conf->dtim_period = bss->dtim_period; + else + bss_conf->dtim_period = 0; + + bss_conf->assoc = 1; /* * For now just always ask the driver to update the basic rateset * when we have associated, we aren't checking whether it actually @@ -848,9 +888,15 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, /* Tell the driver to monitor connection quality (if supported) */ if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && - sdata->vif.bss_conf.cqm_rssi_thold) + bss_conf->cqm_rssi_thold) bss_info_changed |= BSS_CHANGED_CQM; + /* Enable ARP filtering */ + if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) { + bss_conf->arp_filter_enabled = sdata->arp_filter_state; + bss_info_changed |= BSS_CHANGED_ARP_FILTER; + } + ieee80211_bss_info_change_notify(sdata, bss_info_changed); mutex_lock(&local->iflist_mtx); @@ -898,13 +944,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, netif_tx_stop_all_queues(sdata->dev); netif_carrier_off(sdata->dev); - rcu_read_lock(); + mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, bssid); if (sta) { - set_sta_flags(sta, WLAN_STA_DISASSOC); + set_sta_flags(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta); } - rcu_read_unlock(); + mutex_unlock(&local->sta_mtx); changed |= ieee80211_reset_erp_info(sdata); @@ -932,6 +978,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_hw_config(local, config_changed); + /* Disable ARP filtering */ + if (sdata->vif.bss_conf.arp_filter_enabled) { + sdata->vif.bss_conf.arp_filter_enabled = false; + changed |= BSS_CHANGED_ARP_FILTER; + } + /* The BSSID (not really interesting) and HT changed */ changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; ieee80211_bss_info_change_notify(sdata, changed); @@ -1279,7 +1331,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, } if (elems.wmm_param) - ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, + ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, elems.wmm_param_len); else ieee80211_set_wmm_default(sdata); @@ -1551,7 +1603,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); - ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, + ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, elems.wmm_param_len); } @@ -1633,35 +1685,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_bss_info_change_notify(sdata, changed); } -ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_mgmt *mgmt; - u16 fc; - - if (skb->len < 24) - return RX_DROP_MONITOR; - - mgmt = (struct ieee80211_mgmt *) skb->data; - fc = le16_to_cpu(mgmt->frame_control); - - switch (fc & IEEE80211_FCTL_STYPE) { - case IEEE80211_STYPE_PROBE_RESP: - case IEEE80211_STYPE_BEACON: - case IEEE80211_STYPE_DEAUTH: - case IEEE80211_STYPE_DISASSOC: - case IEEE80211_STYPE_ACTION: - skb_queue_tail(&sdata->u.mgd.skb_queue, skb); - ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); - return RX_QUEUED; - } - - return RX_DROP_MONITOR; -} - -static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb) +void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_rx_status *rx_status; @@ -1693,44 +1718,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, break; case IEEE80211_STYPE_ACTION: switch (mgmt->u.action.category) { - case WLAN_CATEGORY_BACK: { - struct ieee80211_local *local = sdata->local; - int len = skb->len; - struct sta_info *sta; - - rcu_read_lock(); - sta = sta_info_get(sdata, mgmt->sa); - if (!sta) { - rcu_read_unlock(); - break; - } - - local_bh_disable(); - - switch (mgmt->u.action.u.addba_req.action_code) { - case WLAN_ACTION_ADDBA_REQ: - if (len < (IEEE80211_MIN_ACTION_SIZE + - sizeof(mgmt->u.action.u.addba_req))) - break; - ieee80211_process_addba_request(local, sta, mgmt, len); - break; - case WLAN_ACTION_ADDBA_RESP: - if (len < (IEEE80211_MIN_ACTION_SIZE + - sizeof(mgmt->u.action.u.addba_resp))) - break; - ieee80211_process_addba_resp(local, sta, mgmt, len); - break; - case WLAN_ACTION_DELBA: - if (len < (IEEE80211_MIN_ACTION_SIZE + - sizeof(mgmt->u.action.u.delba))) - break; - ieee80211_process_delba(sdata, sta, mgmt, len); - break; - } - local_bh_enable(); - rcu_read_unlock(); - break; - } case WLAN_CATEGORY_SPECTRUM_MGMT: ieee80211_sta_process_chanswitch(sdata, &mgmt->u.action.u.chan_switch.sw_elem, @@ -1754,7 +1741,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, default: WARN(1, "unexpected: %d", rma); } - goto out; + return; } mutex_unlock(&ifmgd->mtx); @@ -1769,7 +1756,8 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, if (wk->sdata != sdata) continue; - if (wk->type != IEEE80211_WORK_ASSOC) + if (wk->type != IEEE80211_WORK_ASSOC && + wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) continue; if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) @@ -1799,8 +1787,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); } - out: - kfree_skb(skb); } static void ieee80211_sta_timer(unsigned long data) @@ -1815,39 +1801,13 @@ static void ieee80211_sta_timer(unsigned long data) return; } - ieee80211_queue_work(&local->hw, &ifmgd->work); + ieee80211_queue_work(&local->hw, &sdata->work); } -static void ieee80211_sta_work(struct work_struct *work) +void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) { - struct ieee80211_sub_if_data *sdata = - container_of(work, struct ieee80211_sub_if_data, u.mgd.work); struct ieee80211_local *local = sdata->local; - struct ieee80211_if_managed *ifmgd; - struct sk_buff *skb; - - if (!ieee80211_sdata_running(sdata)) - return; - - if (local->scanning) - return; - - if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) - return; - - /* - * ieee80211_queue_work() should have picked up most cases, - * here we'll pick the the rest. - */ - if (WARN(local->suspended, "STA MLME work scheduled while " - "going to suspend\n")) - return; - - ifmgd = &sdata->u.mgd; - - /* first process frames to avoid timing out while a frame is pending */ - while ((skb = skb_dequeue(&ifmgd->skb_queue))) - ieee80211_sta_rx_queued_mgmt(sdata, skb); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* then process the rest of the work */ mutex_lock(&ifmgd->mtx); @@ -1942,8 +1902,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work); /* and do all the other regular work too */ - ieee80211_queue_work(&sdata->local->hw, - &sdata->u.mgd.work); + ieee80211_queue_work(&sdata->local->hw, &sdata->work); } } @@ -1958,7 +1917,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) * time -- the code here is properly synchronised. */ - cancel_work_sync(&ifmgd->work); cancel_work_sync(&ifmgd->beacon_connection_loss_work); if (del_timer_sync(&ifmgd->timer)) set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); @@ -1990,7 +1948,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_managed *ifmgd; ifmgd = &sdata->u.mgd; - INIT_WORK(&ifmgd->work, ieee80211_sta_work); INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); INIT_WORK(&ifmgd->beacon_connection_loss_work, @@ -2003,7 +1960,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) (unsigned long) sdata); setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, (unsigned long) sdata); - skb_queue_head_init(&ifmgd->skb_queue); ifmgd->flags = 0; @@ -2081,6 +2037,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, auth_alg = WLAN_AUTH_OPEN; break; case NL80211_AUTHTYPE_SHARED_KEY: + if (IS_ERR(sdata->local->wep_tx_tfm)) + return -EOPNOTSUPP; auth_alg = WLAN_AUTH_SHARED_KEY; break; case NL80211_AUTHTYPE_FT: @@ -2134,6 +2092,8 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt; + struct ieee80211_rx_status *rx_status; + struct ieee802_11_elems elems; u16 status; if (!skb) { @@ -2141,6 +2101,19 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, return WORK_DONE_DESTROY; } + if (wk->type == IEEE80211_WORK_ASSOC_BEACON_WAIT) { + mutex_lock(&wk->sdata->u.mgd.mtx); + rx_status = (void *) skb->cb; + ieee802_11_parse_elems(skb->data + 24 + 12, skb->len - 24 - 12, &elems); + ieee80211_rx_bss_info(wk->sdata, (void *)skb->data, skb->len, rx_status, + &elems, true); + mutex_unlock(&wk->sdata->u.mgd.mtx); + + wk->type = IEEE80211_WORK_ASSOC; + /* not really done yet */ + return WORK_DONE_REQUEUE; + } + mgmt = (void *)skb->data; status = le16_to_cpu(mgmt->u.assoc_resp.status_code); @@ -2153,6 +2126,7 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, wk->filter_ta); return WORK_DONE_DESTROY; } + mutex_unlock(&wk->sdata->u.mgd.mtx); } @@ -2253,10 +2227,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, if (req->prev_bssid) memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN); - wk->type = IEEE80211_WORK_ASSOC; wk->chan = req->bss->channel; wk->sdata = sdata; wk->done = ieee80211_assoc_done; + if (!bss->dtim_period && + sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) + wk->type = IEEE80211_WORK_ASSOC_BEACON_WAIT; + else + wk->type = IEEE80211_WORK_ASSOC; if (req->use_mfp) { ifmgd->mfp = IEEE80211_MFP_REQUIRED; @@ -2282,14 +2260,16 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_work *wk; - const u8 *bssid = req->bss->bssid; + u8 bssid[ETH_ALEN]; + bool assoc_bss = false; mutex_lock(&ifmgd->mtx); + memcpy(bssid, req->bss->bssid, ETH_ALEN); if (ifmgd->associated == req->bss) { - bssid = req->bss->bssid; - ieee80211_set_disassoc(sdata, true); + ieee80211_set_disassoc(sdata, false); mutex_unlock(&ifmgd->mtx); + assoc_bss = true; } else { bool not_auth_yet = false; @@ -2302,7 +2282,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, if (wk->type != IEEE80211_WORK_DIRECT_PROBE && wk->type != IEEE80211_WORK_AUTH && - wk->type != IEEE80211_WORK_ASSOC) + wk->type != IEEE80211_WORK_ASSOC && + wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) continue; if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) @@ -2335,6 +2316,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, cookie, !req->local_state_change); + if (assoc_bss) + sta_info_destroy_addr(sdata, bssid); ieee80211_recalc_idle(sdata->local); @@ -2379,41 +2362,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, return 0; } -int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type, - const u8 *buf, size_t len, u64 *cookie) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct sk_buff *skb; - - /* Check that we are on the requested channel for transmission */ - if ((chan != local->tmp_channel || - channel_type != local->tmp_channel_type) && - (chan != local->oper_channel || - channel_type != local->_oper_channel_type)) - return -EBUSY; - - skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); - if (!skb) - return -ENOMEM; - skb_reserve(skb, local->hw.extra_tx_headroom); - - memcpy(skb_put(skb, len), buf, len); - - if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) - IEEE80211_SKB_CB(skb)->flags |= - IEEE80211_TX_INTFL_DONT_ENCRYPT; - IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX | - IEEE80211_TX_CTL_REQ_TX_STATUS; - skb->dev = sdata->dev; - ieee80211_tx_skb(sdata, skb); - - *cookie = (unsigned long) skb; - return 0; -} - void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, gfp_t gfp) diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 75202b295a4e81ba42691c61b4356347f94678bf..d287fde0431d6b2e688da5400d0ef9f41d9cf1c2 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -40,22 +40,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) list_for_each_entry(sdata, &local->interfaces, list) ieee80211_disable_keys(sdata); - /* Tear down aggregation sessions */ - - rcu_read_lock(); - - if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { - list_for_each_entry_rcu(sta, &local->sta_list, list) { + /* tear down aggregation sessions and remove STAs */ + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { + if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { set_sta_flags(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta); } - } - rcu_read_unlock(); - - /* remove STAs */ - mutex_lock(&local->sta_mtx); - list_for_each_entry(sta, &local->sta_list, list) { if (sta->uploaded) { sdata = sta->sdata; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) @@ -72,6 +64,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) /* remove all interfaces */ list_for_each_entry(sdata, &local->interfaces, list) { + cancel_work_sync(&sdata->work); + switch(sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_quiesce(sdata); diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 065a96190e32a28409dda91110d929f9690c2f64..168427b0ffdc97fca6147118518f4593b79770f6 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -147,5 +147,18 @@ static inline void rc80211_minstrel_exit(void) } #endif +#ifdef CONFIG_MAC80211_RC_MINSTREL_HT +extern int rc80211_minstrel_ht_init(void); +extern void rc80211_minstrel_ht_exit(void); +#else +static inline int rc80211_minstrel_ht_init(void) +{ + return 0; +} +static inline void rc80211_minstrel_ht_exit(void) +{ +} +#endif + #endif /* IEEE80211_RATE_H */ diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index f65ce6dcc8e25d3aec3afe2ea9af85bd1b5932de..778c604d7939e39e3894e3c51af6b97360ca8e3b 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -67,7 +67,6 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix) for (i = rix; i >= 0; i--) if (mi->r[i].rix == rix) break; - WARN_ON(i < 0); return i; } diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c new file mode 100644 index 0000000000000000000000000000000000000000..c5b465904e3bfcec2ac0199e64dda46072022330 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -0,0 +1,827 @@ +/* + * Copyright (C) 2010 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "rate.h" +#include "rc80211_minstrel.h" +#include "rc80211_minstrel_ht.h" + +#define AVG_PKT_SIZE 1200 +#define SAMPLE_COLUMNS 10 +#define EWMA_LEVEL 75 + +/* Number of bits for an average sized packet */ +#define MCS_NBITS (AVG_PKT_SIZE << 3) + +/* Number of symbols for a packet with (bps) bits per symbol */ +#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) + +/* Transmission time for a packet containing (syms) symbols */ +#define MCS_SYMBOL_TIME(sgi, syms) \ + (sgi ? \ + ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \ + (syms) << 2 /* syms * 4 us */ \ + ) + +/* Transmit duration for the raw data part of an average sized packet */ +#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) + +/* MCS rate information for an MCS group */ +#define MCS_GROUP(_streams, _sgi, _ht40) { \ + .streams = _streams, \ + .flags = \ + (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ + (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ + .duration = { \ + MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \ + MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \ + MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \ + MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \ + MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \ + MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \ + MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \ + MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \ + } \ +} + +/* + * To enable sufficiently targeted rate sampling, MCS rates are divided into + * groups, based on the number of streams and flags (HT40, SGI) that they + * use. + */ +const struct mcs_group minstrel_mcs_groups[] = { + MCS_GROUP(1, 0, 0), + MCS_GROUP(2, 0, 0), +#if MINSTREL_MAX_STREAMS >= 3 + MCS_GROUP(3, 0, 0), +#endif + + MCS_GROUP(1, 1, 0), + MCS_GROUP(2, 1, 0), +#if MINSTREL_MAX_STREAMS >= 3 + MCS_GROUP(3, 1, 0), +#endif + + MCS_GROUP(1, 0, 1), + MCS_GROUP(2, 0, 1), +#if MINSTREL_MAX_STREAMS >= 3 + MCS_GROUP(3, 0, 1), +#endif + + MCS_GROUP(1, 1, 1), + MCS_GROUP(2, 1, 1), +#if MINSTREL_MAX_STREAMS >= 3 + MCS_GROUP(3, 1, 1), +#endif +}; + +static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; + +/* + * Perform EWMA (Exponentially Weighted Moving Average) calculation + */ +static int +minstrel_ewma(int old, int new, int weight) +{ + return (new * (100 - weight) + old * weight) / 100; +} + +/* + * Look up an MCS group index based on mac80211 rate information + */ +static int +minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) +{ + int streams = (rate->idx / MCS_GROUP_RATES) + 1; + u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH; + int i; + + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { + if (minstrel_mcs_groups[i].streams != streams) + continue; + if (minstrel_mcs_groups[i].flags != (rate->flags & flags)) + continue; + + return i; + } + + WARN_ON(1); + return 0; +} + +static inline struct minstrel_rate_stats * +minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) +{ + return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES]; +} + + +/* + * Recalculate success probabilities and counters for a rate using EWMA + */ +static void +minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr) +{ + if (unlikely(mr->attempts > 0)) { + mr->sample_skipped = 0; + mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts); + if (!mr->att_hist) + mr->probability = mr->cur_prob; + else + mr->probability = minstrel_ewma(mr->probability, + mr->cur_prob, EWMA_LEVEL); + mr->att_hist += mr->attempts; + mr->succ_hist += mr->success; + } else { + mr->sample_skipped++; + } + mr->last_success = mr->success; + mr->last_attempts = mr->attempts; + mr->success = 0; + mr->attempts = 0; +} + +/* + * Calculate throughput based on the average A-MPDU length, taking into account + * the expected number of retransmissions and their expected length + */ +static void +minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + int group, int rate) +{ + struct minstrel_rate_stats *mr; + unsigned int usecs; + + mr = &mi->groups[group].rates[rate]; + + if (mr->probability < MINSTREL_FRAC(1, 10)) { + mr->cur_tp = 0; + return; + } + + usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); + usecs += minstrel_mcs_groups[group].duration[rate]; + mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability); +} + +/* + * Update rate statistics and select new primary rates + * + * Rules for rate selection: + * - max_prob_rate must use only one stream, as a tradeoff between delivery + * probability and throughput during strong fluctuations + * - as long as the max prob rate has a probability of more than 3/4, pick + * higher throughput rates, even if the probablity is a bit lower + */ +static void +minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) +{ + struct minstrel_mcs_group_data *mg; + struct minstrel_rate_stats *mr; + int cur_prob, cur_prob_tp, cur_tp, cur_tp2; + int group, i, index; + + if (mi->ampdu_packets > 0) { + mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, + MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL); + mi->ampdu_len = 0; + mi->ampdu_packets = 0; + } + + mi->sample_slow = 0; + mi->sample_count = 0; + mi->max_tp_rate = 0; + mi->max_tp_rate2 = 0; + mi->max_prob_rate = 0; + + for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { + cur_prob = 0; + cur_prob_tp = 0; + cur_tp = 0; + cur_tp2 = 0; + + mg = &mi->groups[group]; + if (!mg->supported) + continue; + + mg->max_tp_rate = 0; + mg->max_tp_rate2 = 0; + mg->max_prob_rate = 0; + mi->sample_count++; + + for (i = 0; i < MCS_GROUP_RATES; i++) { + if (!(mg->supported & BIT(i))) + continue; + + mr = &mg->rates[i]; + mr->retry_updated = false; + index = MCS_GROUP_RATES * group + i; + minstrel_calc_rate_ewma(mp, mr); + minstrel_ht_calc_tp(mp, mi, group, i); + + if (!mr->cur_tp) + continue; + + /* ignore the lowest rate of each single-stream group */ + if (!i && minstrel_mcs_groups[group].streams == 1) + continue; + + if ((mr->cur_tp > cur_prob_tp && mr->probability > + MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { + mg->max_prob_rate = index; + cur_prob = mr->probability; + cur_prob_tp = mr->cur_tp; + } + + if (mr->cur_tp > cur_tp) { + swap(index, mg->max_tp_rate); + cur_tp = mr->cur_tp; + mr = minstrel_get_ratestats(mi, index); + } + + if (index >= mg->max_tp_rate) + continue; + + if (mr->cur_tp > cur_tp2) { + mg->max_tp_rate2 = index; + cur_tp2 = mr->cur_tp; + } + } + } + + /* try to sample up to half of the availble rates during each interval */ + mi->sample_count *= 4; + + cur_prob = 0; + cur_prob_tp = 0; + cur_tp = 0; + cur_tp2 = 0; + for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { + mg = &mi->groups[group]; + if (!mg->supported) + continue; + + mr = minstrel_get_ratestats(mi, mg->max_prob_rate); + if (cur_prob_tp < mr->cur_tp && + minstrel_mcs_groups[group].streams == 1) { + mi->max_prob_rate = mg->max_prob_rate; + cur_prob = mr->cur_prob; + cur_prob_tp = mr->cur_tp; + } + + mr = minstrel_get_ratestats(mi, mg->max_tp_rate); + if (cur_tp < mr->cur_tp) { + mi->max_tp_rate = mg->max_tp_rate; + cur_tp = mr->cur_tp; + } + + mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); + if (cur_tp2 < mr->cur_tp) { + mi->max_tp_rate2 = mg->max_tp_rate2; + cur_tp2 = mr->cur_tp; + } + } + + mi->stats_update = jiffies; +} + +static bool +minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) +{ + if (!rate->count) + return false; + + if (rate->idx < 0) + return false; + + return !!(rate->flags & IEEE80211_TX_RC_MCS); +} + +static void +minstrel_next_sample_idx(struct minstrel_ht_sta *mi) +{ + struct minstrel_mcs_group_data *mg; + + for (;;) { + mi->sample_group++; + mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); + mg = &mi->groups[mi->sample_group]; + + if (!mg->supported) + continue; + + if (++mg->index >= MCS_GROUP_RATES) { + mg->index = 0; + if (++mg->column >= ARRAY_SIZE(sample_table)) + mg->column = 0; + } + break; + } +} + +static void +minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, + bool primary) +{ + int group, orig_group; + + orig_group = group = *idx / MCS_GROUP_RATES; + while (group > 0) { + group--; + + if (!mi->groups[group].supported) + continue; + + if (minstrel_mcs_groups[group].streams > + minstrel_mcs_groups[orig_group].streams) + continue; + + if (primary) + *idx = mi->groups[group].max_tp_rate; + else + *idx = mi->groups[group].max_tp_rate2; + break; + } +} + +static void +minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + u16 tid; + + if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) + return; + + if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) + return; + + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + if (likely(sta->ampdu_mlme.tid_tx[tid])) + return; + + ieee80211_start_tx_ba_session(pubsta, tid); +} + +static void +minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + struct minstrel_ht_sta_priv *msp = priv_sta; + struct minstrel_ht_sta *mi = &msp->ht; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *ar = info->status.rates; + struct minstrel_rate_stats *rate, *rate2; + struct minstrel_priv *mp = priv; + bool last = false; + int group; + int i = 0; + + if (!msp->is_ht) + return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb); + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + if (!info->status.ampdu_len) { + info->status.ampdu_ack_len = 1; + info->status.ampdu_len = 1; + } + + mi->ampdu_packets++; + mi->ampdu_len += info->status.ampdu_len; + + if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { + mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); + mi->sample_tries = 3; + mi->sample_count--; + } + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { + mi->sample_packets += info->status.ampdu_len; + minstrel_next_sample_idx(mi); + } + + for (i = 0; !last; i++) { + last = (i == IEEE80211_TX_MAX_RATES - 1) || + !minstrel_ht_txstat_valid(&ar[i + 1]); + + if (!minstrel_ht_txstat_valid(&ar[i])) + break; + + group = minstrel_ht_get_group_idx(&ar[i]); + rate = &mi->groups[group].rates[ar[i].idx % 8]; + + if (last && (info->flags & IEEE80211_TX_STAT_ACK)) + rate->success += info->status.ampdu_ack_len; + + rate->attempts += ar[i].count * info->status.ampdu_len; + } + + /* + * check for sudden death of spatial multiplexing, + * downgrade to a lower number of streams if necessary. + */ + rate = minstrel_get_ratestats(mi, mi->max_tp_rate); + if (rate->attempts > 30 && + MINSTREL_FRAC(rate->success, rate->attempts) < + MINSTREL_FRAC(20, 100)) + minstrel_downgrade_rate(mi, &mi->max_tp_rate, true); + + rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2); + if (rate2->attempts > 30 && + MINSTREL_FRAC(rate2->success, rate2->attempts) < + MINSTREL_FRAC(20, 100)) + minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false); + + if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { + minstrel_ht_update_stats(mp, mi); + minstrel_aggr_check(mp, sta, skb); + } +} + +static void +minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + int index) +{ + struct minstrel_rate_stats *mr; + const struct mcs_group *group; + unsigned int tx_time, tx_time_rtscts, tx_time_data; + unsigned int cw = mp->cw_min; + unsigned int t_slot = 9; /* FIXME */ + unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); + + mr = minstrel_get_ratestats(mi, index); + if (mr->probability < MINSTREL_FRAC(1, 10)) { + mr->retry_count = 1; + mr->retry_count_rtscts = 1; + return; + } + + mr->retry_count = 2; + mr->retry_count_rtscts = 2; + mr->retry_updated = true; + + group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; + tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; + tx_time = 2 * (t_slot + mi->overhead + tx_time_data); + tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data); + do { + cw = (cw << 1) | 1; + cw = min(cw, mp->cw_max); + tx_time += cw + t_slot + mi->overhead; + tx_time_rtscts += cw + t_slot + mi->overhead_rtscts; + if (tx_time_rtscts < mp->segment_size) + mr->retry_count_rtscts++; + } while ((tx_time < mp->segment_size) && + (++mr->retry_count < mp->max_retry)); +} + + +static void +minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, + struct ieee80211_tx_rate *rate, int index, + struct ieee80211_tx_rate_control *txrc, + bool sample, bool rtscts) +{ + const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; + struct minstrel_rate_stats *mr; + + mr = minstrel_get_ratestats(mi, index); + if (!mr->retry_updated) + minstrel_calc_retransmit(mp, mi, index); + + if (mr->probability < MINSTREL_FRAC(20, 100)) + rate->count = 2; + else if (rtscts) + rate->count = mr->retry_count_rtscts; + else + rate->count = mr->retry_count; + + rate->flags = IEEE80211_TX_RC_MCS | group->flags; + if (txrc->short_preamble) + rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; + if (txrc->rts || rtscts) + rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; + rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES; +} + +static inline int +minstrel_get_duration(int index) +{ + const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; + return group->duration[index % MCS_GROUP_RATES]; +} + +static int +minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) +{ + struct minstrel_rate_stats *mr; + struct minstrel_mcs_group_data *mg; + int sample_idx = 0; + + if (mi->sample_wait > 0) { + mi->sample_wait--; + return -1; + } + + if (!mi->sample_tries) + return -1; + + mi->sample_tries--; + mg = &mi->groups[mi->sample_group]; + sample_idx = sample_table[mg->column][mg->index]; + mr = &mg->rates[sample_idx]; + sample_idx += mi->sample_group * MCS_GROUP_RATES; + + /* + * When not using MRR, do not sample if the probability is already + * higher than 95% to avoid wasting airtime + */ + if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100))) + goto next; + + /* + * Make sure that lower rates get sampled only occasionally, + * if the link is working perfectly. + */ + if (minstrel_get_duration(sample_idx) > + minstrel_get_duration(mi->max_tp_rate)) { + if (mr->sample_skipped < 10) + goto next; + + if (mi->sample_slow++ > 2) + goto next; + } + + return sample_idx; + +next: + minstrel_next_sample_idx(mi); + return -1; +} + +static void +minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); + struct ieee80211_tx_rate *ar = info->status.rates; + struct minstrel_ht_sta_priv *msp = priv_sta; + struct minstrel_ht_sta *mi = &msp->ht; + struct minstrel_priv *mp = priv; + int sample_idx; + + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + if (!msp->is_ht) + return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); + + info->flags |= mi->tx_flags; + sample_idx = minstrel_get_sample_rate(mp, mi); + if (sample_idx >= 0) { + minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, + txrc, true, false); + minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, + txrc, false, true); + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + } else { + minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, + txrc, false, false); + minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, + txrc, false, true); + } + minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true); + + ar[3].count = 0; + ar[3].idx = -1; + + mi->total_packets++; + + /* wraparound */ + if (mi->total_packets == ~0) { + mi->total_packets = 0; + mi->sample_packets = 0; + } +} + +static void +minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + enum nl80211_channel_type oper_chan_type) +{ + struct minstrel_priv *mp = priv; + struct minstrel_ht_sta_priv *msp = priv_sta; + struct minstrel_ht_sta *mi = &msp->ht; + struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; + struct ieee80211_local *local = hw_to_local(mp->hw); + u16 sta_cap = sta->ht_cap.cap; + int ack_dur; + int stbc; + int i; + + /* fall back to the old minstrel for legacy stations */ + if (!sta->ht_cap.ht_supported) { + msp->is_ht = false; + memset(&msp->legacy, 0, sizeof(msp->legacy)); + msp->legacy.r = msp->ratelist; + msp->legacy.sample_table = msp->sample_table; + return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); + } + + BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != + MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); + + msp->is_ht = true; + memset(mi, 0, sizeof(*mi)); + mi->stats_update = jiffies; + + ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); + mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; + mi->overhead_rtscts = mi->overhead + 2 * ack_dur; + + mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); + + /* When using MRR, sample more on the first attempt, without delay */ + if (mp->has_mrr) { + mi->sample_count = 16; + mi->sample_wait = 0; + } else { + mi->sample_count = 8; + mi->sample_wait = 8; + } + mi->sample_tries = 4; + + stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> + IEEE80211_HT_CAP_RX_STBC_SHIFT; + mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; + + if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) + mi->tx_flags |= IEEE80211_TX_CTL_LDPC; + + if (oper_chan_type != NL80211_CHAN_HT40MINUS && + oper_chan_type != NL80211_CHAN_HT40PLUS) + sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + + for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { + u16 req = 0; + + mi->groups[i].supported = 0; + if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { + if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + req |= IEEE80211_HT_CAP_SGI_40; + else + req |= IEEE80211_HT_CAP_SGI_20; + } + + if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + + if ((sta_cap & req) != req) + continue; + + mi->groups[i].supported = + mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; + } +} + +static void +minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ + struct minstrel_priv *mp = priv; + + minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type); +} + +static void +minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed, enum nl80211_channel_type oper_chan_type) +{ + minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); +} + +static void * +minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct ieee80211_supported_band *sband; + struct minstrel_ht_sta_priv *msp; + struct minstrel_priv *mp = priv; + struct ieee80211_hw *hw = mp->hw; + int max_rates = 0; + int i; + + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + sband = hw->wiphy->bands[i]; + if (sband && sband->n_bitrates > max_rates) + max_rates = sband->n_bitrates; + } + + msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); + if (!msp) + return NULL; + + msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp); + if (!msp->ratelist) + goto error; + + msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp); + if (!msp->sample_table) + goto error1; + + return msp; + +error1: + kfree(msp->ratelist); +error: + kfree(msp); + return NULL; +} + +static void +minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) +{ + struct minstrel_ht_sta_priv *msp = priv_sta; + + kfree(msp->sample_table); + kfree(msp->ratelist); + kfree(msp); +} + +static void * +minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return mac80211_minstrel.alloc(hw, debugfsdir); +} + +static void +minstrel_ht_free(void *priv) +{ + mac80211_minstrel.free(priv); +} + +static struct rate_control_ops mac80211_minstrel_ht = { + .name = "minstrel_ht", + .tx_status = minstrel_ht_tx_status, + .get_rate = minstrel_ht_get_rate, + .rate_init = minstrel_ht_rate_init, + .rate_update = minstrel_ht_rate_update, + .alloc_sta = minstrel_ht_alloc_sta, + .free_sta = minstrel_ht_free_sta, + .alloc = minstrel_ht_alloc, + .free = minstrel_ht_free, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = minstrel_ht_add_sta_debugfs, + .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, +#endif +}; + + +static void +init_sample_table(void) +{ + int col, i, new_idx; + u8 rnd[MCS_GROUP_RATES]; + + memset(sample_table, 0xff, sizeof(sample_table)); + for (col = 0; col < SAMPLE_COLUMNS; col++) { + for (i = 0; i < MCS_GROUP_RATES; i++) { + get_random_bytes(rnd, sizeof(rnd)); + new_idx = (i + rnd[i]) % MCS_GROUP_RATES; + + while (sample_table[col][new_idx] != 0xff) + new_idx = (new_idx + 1) % MCS_GROUP_RATES; + + sample_table[col][new_idx] = i; + } + } +} + +int __init +rc80211_minstrel_ht_init(void) +{ + init_sample_table(); + return ieee80211_rate_control_register(&mac80211_minstrel_ht); +} + +void +rc80211_minstrel_ht_exit(void) +{ + ieee80211_rate_control_unregister(&mac80211_minstrel_ht); +} diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h new file mode 100644 index 0000000000000000000000000000000000000000..462d2b227ed579682e7fe64fc2556377320263fa --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2010 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __RC_MINSTREL_HT_H +#define __RC_MINSTREL_HT_H + +/* + * The number of streams can be changed to 2 to reduce code + * size and memory footprint. + */ +#define MINSTREL_MAX_STREAMS 3 +#define MINSTREL_STREAM_GROUPS 4 + +/* scaled fraction values */ +#define MINSTREL_SCALE 16 +#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div) +#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE) + +#define MCS_GROUP_RATES 8 + +struct mcs_group { + u32 flags; + unsigned int streams; + unsigned int duration[MCS_GROUP_RATES]; +}; + +extern const struct mcs_group minstrel_mcs_groups[]; + +struct minstrel_rate_stats { + /* current / last sampling period attempts/success counters */ + unsigned int attempts, last_attempts; + unsigned int success, last_success; + + /* total attempts/success counters */ + u64 att_hist, succ_hist; + + /* current throughput */ + unsigned int cur_tp; + + /* packet delivery probabilities */ + unsigned int cur_prob, probability; + + /* maximum retry counts */ + unsigned int retry_count; + unsigned int retry_count_rtscts; + + bool retry_updated; + u8 sample_skipped; +}; + +struct minstrel_mcs_group_data { + u8 index; + u8 column; + + /* bitfield of supported MCS rates of this group */ + u8 supported; + + /* selected primary rates */ + unsigned int max_tp_rate; + unsigned int max_tp_rate2; + unsigned int max_prob_rate; + + /* MCS rate statistics */ + struct minstrel_rate_stats rates[MCS_GROUP_RATES]; +}; + +struct minstrel_ht_sta { + /* ampdu length (average, per sampling interval) */ + unsigned int ampdu_len; + unsigned int ampdu_packets; + + /* ampdu length (EWMA) */ + unsigned int avg_ampdu_len; + + /* best throughput rate */ + unsigned int max_tp_rate; + + /* second best throughput rate */ + unsigned int max_tp_rate2; + + /* best probability rate */ + unsigned int max_prob_rate; + + /* time of last status update */ + unsigned long stats_update; + + /* overhead time in usec for each frame */ + unsigned int overhead; + unsigned int overhead_rtscts; + + unsigned int total_packets; + unsigned int sample_packets; + + /* tx flags to add for frames for this sta */ + u32 tx_flags; + + u8 sample_wait; + u8 sample_tries; + u8 sample_count; + u8 sample_slow; + + /* current MCS group to be sampled */ + u8 sample_group; + + /* MCS rate group info and statistics */ + struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS]; +}; + +struct minstrel_ht_sta_priv { + union { + struct minstrel_ht_sta ht; + struct minstrel_sta_info legacy; + }; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *dbg_stats; +#endif + void *ratelist; + void *sample_table; + bool is_ht; +}; + +void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); +void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta); + +#endif diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..4a5a4b3e7799f1845421843e22fee503ef62ffb4 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2010 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include "rc80211_minstrel.h" +#include "rc80211_minstrel_ht.h" + +static int +minstrel_ht_stats_open(struct inode *inode, struct file *file) +{ + struct minstrel_ht_sta_priv *msp = inode->i_private; + struct minstrel_ht_sta *mi = &msp->ht; + struct minstrel_debugfs_info *ms; + unsigned int i, j, tp, prob, eprob; + char *p; + int ret; + + if (!msp->is_ht) { + inode->i_private = &msp->legacy; + ret = minstrel_stats_open(inode, file); + inode->i_private = msp; + return ret; + } + + ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL); + if (!ms) + return -ENOMEM; + + file->private_data = ms; + p = ms->buf; + p += sprintf(p, "type rate throughput ewma prob this prob " + "this succ/attempt success attempts\n"); + for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) { + char htmode = '2'; + char gimode = 'L'; + + if (!mi->groups[i].supported) + continue; + + if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + htmode = '4'; + if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) + gimode = 'S'; + + for (j = 0; j < MCS_GROUP_RATES; j++) { + struct minstrel_rate_stats *mr = &mi->groups[i].rates[j]; + int idx = i * MCS_GROUP_RATES + j; + + if (!(mi->groups[i].supported & BIT(j))) + continue; + + p += sprintf(p, "HT%c0/%cGI ", htmode, gimode); + + *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' '; + *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' '; + *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; + p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) * + MCS_GROUP_RATES + j); + + tp = mr->cur_tp / 10; + prob = MINSTREL_TRUNC(mr->cur_prob * 1000); + eprob = MINSTREL_TRUNC(mr->probability * 1000); + + p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " + "%3u(%3u) %8llu %8llu\n", + tp / 10, tp % 10, + eprob / 10, eprob % 10, + prob / 10, prob % 10, + mr->last_success, + mr->last_attempts, + (unsigned long long)mr->succ_hist, + (unsigned long long)mr->att_hist); + } + } + p += sprintf(p, "\nTotal packet count:: ideal %d " + "lookaround %d\n", + max(0, (int) mi->total_packets - (int) mi->sample_packets), + mi->sample_packets); + p += sprintf(p, "Average A-MPDU length: %d.%d\n", + MINSTREL_TRUNC(mi->avg_ampdu_len), + MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); + ms->len = p - ms->buf; + + return 0; +} + +static const struct file_operations minstrel_ht_stat_fops = { + .owner = THIS_MODULE, + .open = minstrel_ht_stats_open, + .read = minstrel_stats_read, + .release = minstrel_stats_release, +}; + +void +minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) +{ + struct minstrel_ht_sta_priv *msp = priv_sta; + + msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp, + &minstrel_ht_stat_fops); +} + +void +minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta) +{ + struct minstrel_ht_sta_priv *msp = priv_sta; + + debugfs_remove(msp->dbg_stats); +} diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index be9abc2e6348943753aab8daae864b561adce82f..fa0f37e4afe4901226b0ccd668ae6a88c136eeb2 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -293,7 +293,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { skb2->dev = prev_dev; - netif_rx(skb2); + netif_receive_skb(skb2); } } @@ -304,7 +304,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, if (prev_dev) { skb->dev = prev_dev; - netif_rx(skb); + netif_receive_skb(skb); } else dev_kfree_skb(skb); @@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; - spin_lock(&sta->lock); - - if (!sta->ampdu_mlme.tid_active_rx[tid]) - goto dont_reorder_unlock; - - tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; + tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); + if (!tid_agg_rx) + goto dont_reorder; /* qos null data frames are excluded */ if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) - goto dont_reorder_unlock; + goto dont_reorder; /* new, potentially un-ordered, ampdu frame - process it */ @@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, /* if this mpdu is fragmented - terminate rx aggregation session */ sc = le16_to_cpu(hdr->seq_ctrl); if (sc & IEEE80211_SCTL_FRAG) { - spin_unlock(&sta->lock); - __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, - WLAN_REASON_QSTA_REQUIRE_SETUP); - dev_kfree_skb(skb); + skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; + skb_queue_tail(&rx->sdata->skb_queue, skb); + ieee80211_queue_work(&local->hw, &rx->sdata->work); return; } - if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) { - spin_unlock(&sta->lock); + /* + * No locking needed -- we will only ever process one + * RX packet at a time, and thus own tid_agg_rx. All + * other code manipulating it needs to (and does) make + * sure that we cannot get to it any more before doing + * anything with it. + */ + if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) return; - } - dont_reorder_unlock: - spin_unlock(&sta->lock); dont_reorder: __skb_queue_tail(frames, skb); } @@ -825,6 +824,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) ieee80211_rx_result result = RX_DROP_UNUSABLE; struct ieee80211_key *stakey = NULL; int mmie_keyidx = -1; + __le16 fc; /* * Key selection 101 @@ -866,13 +866,15 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (rx->sta) stakey = rcu_dereference(rx->sta->key); - if (!ieee80211_has_protected(hdr->frame_control)) + fc = hdr->frame_control; + + if (!ieee80211_has_protected(fc)) mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); if (!is_multicast_ether_addr(hdr->addr1) && stakey) { rx->key = stakey; /* Skip decryption if the frame is not protected. */ - if (!ieee80211_has_protected(hdr->frame_control)) + if (!ieee80211_has_protected(fc)) return RX_CONTINUE; } else if (mmie_keyidx >= 0) { /* Broadcast/multicast robust management frame / BIP */ @@ -884,7 +886,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) return RX_DROP_MONITOR; /* unexpected BIP keyidx */ rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); - } else if (!ieee80211_has_protected(hdr->frame_control)) { + } else if (!ieee80211_has_protected(fc)) { /* * The frame was not protected, so skip decryption. However, we * need to set rx->key if there is a key that could have been @@ -892,7 +894,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) * have been expected. */ struct ieee80211_key *key = NULL; - if (ieee80211_is_mgmt(hdr->frame_control) && + if (ieee80211_is_mgmt(fc) && is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(rx->sdata->default_mgmt_key))) rx->key = key; @@ -914,7 +916,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) (status->flag & RX_FLAG_IV_STRIPPED)) return RX_CONTINUE; - hdrlen = ieee80211_hdrlen(hdr->frame_control); + hdrlen = ieee80211_hdrlen(fc); if (rx->skb->len < 8 + hdrlen) return RX_DROP_UNUSABLE; /* TODO: count this? */ @@ -947,19 +949,17 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; - - hdr = (struct ieee80211_hdr *)rx->skb->data; - - /* Check for weak IVs if possible */ - if (rx->sta && rx->key->conf.alg == ALG_WEP && - ieee80211_is_data(hdr->frame_control) && - (!(status->flag & RX_FLAG_IV_STRIPPED) || - !(status->flag & RX_FLAG_DECRYPTED)) && - ieee80211_wep_is_weak_iv(rx->skb, rx->key)) - rx->sta->wep_weak_iv_count++; + /* the hdr variable is invalid now! */ switch (rx->key->conf.alg) { case ALG_WEP: + /* Check for weak IVs if possible */ + if (rx->sta && ieee80211_is_data(fc) && + (!(status->flag & RX_FLAG_IV_STRIPPED) || + !(status->flag & RX_FLAG_DECRYPTED)) && + ieee80211_wep_is_weak_iv(rx->skb, rx->key)) + rx->sta->wep_weak_iv_count++; + result = ieee80211_crypto_wep_decrypt(rx); break; case ALG_TKIP: @@ -1267,11 +1267,13 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) rx->queue, &(rx->skb)); if (rx->key && rx->key->conf.alg == ALG_CCMP && ieee80211_has_protected(fc)) { + int queue = ieee80211_is_mgmt(fc) ? + NUM_RX_DATA_QUEUES : rx->queue; /* Store CCMP PN so that we can verify that the next * fragment has a sequential PN value. */ entry->ccmp = 1; memcpy(entry->last_pn, - rx->key->u.ccmp.rx_pn[rx->queue], + rx->key->u.ccmp.rx_pn[queue], CCMP_PN_LEN); } return RX_QUEUED; @@ -1291,6 +1293,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) if (entry->ccmp) { int i; u8 pn[CCMP_PN_LEN], *rpn; + int queue; if (!rx->key || rx->key->conf.alg != ALG_CCMP) return RX_DROP_UNUSABLE; memcpy(pn, entry->last_pn, CCMP_PN_LEN); @@ -1299,7 +1302,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) if (pn[i]) break; } - rpn = rx->key->u.ccmp.rx_pn[rx->queue]; + queue = ieee80211_is_mgmt(fc) ? + NUM_RX_DATA_QUEUES : rx->queue; + rpn = rx->key->u.ccmp.rx_pn[queue]; if (memcmp(pn, rpn, CCMP_PN_LEN)) return RX_DROP_UNUSABLE; memcpy(entry->last_pn, pn, CCMP_PN_LEN); @@ -1573,7 +1578,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) /* deliver to local stack */ skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); - netif_rx(skb); + netif_receive_skb(skb); } } @@ -1829,13 +1834,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) &bar_data, sizeof(bar_data))) return RX_DROP_MONITOR; - spin_lock(&rx->sta->lock); tid = le16_to_cpu(bar_data.control) >> 12; - if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { - spin_unlock(&rx->sta->lock); + + tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); + if (!tid_agg_rx) return RX_DROP_MONITOR; - } - tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; @@ -1848,11 +1851,15 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, frames); kfree_skb(skb); - spin_unlock(&rx->sta->lock); return RX_QUEUED; } - return RX_CONTINUE; + /* + * After this point, we only want management frames, + * so we can drop all remaining control frames to + * cooked monitor interfaces. + */ + return RX_DROP_MONITOR; } static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, @@ -1944,30 +1951,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) if (len < IEEE80211_MIN_ACTION_SIZE + 1) break; - if (sdata->vif.type == NL80211_IFTYPE_STATION) - return ieee80211_sta_rx_mgmt(sdata, rx->skb); - switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.addba_req))) - return RX_DROP_MONITOR; - ieee80211_process_addba_request(local, rx->sta, mgmt, len); - goto handled; + goto invalid; + break; case WLAN_ACTION_ADDBA_RESP: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.addba_resp))) - break; - ieee80211_process_addba_resp(local, rx->sta, mgmt, len); - goto handled; + goto invalid; + break; case WLAN_ACTION_DELBA: if (len < (IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.delba))) - break; - ieee80211_process_delba(sdata, rx->sta, mgmt, len); - goto handled; + goto invalid; + break; + default: + goto invalid; } - break; + + goto queue; case WLAN_CATEGORY_SPECTRUM_MGMT: if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) break; @@ -1997,7 +2001,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) break; - return ieee80211_sta_rx_mgmt(sdata, rx->skb); + goto queue; } break; case WLAN_CATEGORY_SA_QUERY: @@ -2015,11 +2019,12 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) break; case WLAN_CATEGORY_MESH_PLINK: case WLAN_CATEGORY_MESH_PATH_SEL: - if (ieee80211_vif_is_mesh(&sdata->vif)) - return ieee80211_mesh_rx_mgmt(sdata, rx->skb); - break; + if (!ieee80211_vif_is_mesh(&sdata->vif)) + break; + goto queue; } + invalid: /* * For AP mode, hostapd is responsible for handling any action * frames that we didn't handle, including returning unknown @@ -2039,8 +2044,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) */ status = IEEE80211_SKB_RXCB(rx->skb); - if (sdata->vif.type == NL80211_IFTYPE_STATION && - cfg80211_rx_action(rx->sdata->dev, status->freq, + if (cfg80211_rx_action(rx->sdata->dev, status->freq, rx->skb->data, rx->skb->len, GFP_ATOMIC)) goto handled; @@ -2052,11 +2056,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, GFP_ATOMIC); if (nskb) { - struct ieee80211_mgmt *mgmt = (void *)nskb->data; + struct ieee80211_mgmt *nmgmt = (void *)nskb->data; - mgmt->u.action.category |= 0x80; - memcpy(mgmt->da, mgmt->sa, ETH_ALEN); - memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN); + nmgmt->u.action.category |= 0x80; + memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); + memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); memset(nskb->cb, 0, sizeof(nskb->cb)); @@ -2068,6 +2072,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) rx->sta->rx_packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; + + queue: + rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; + skb_queue_tail(&sdata->skb_queue, rx->skb); + ieee80211_queue_work(&local->hw, &sdata->work); + if (rx->sta) + rx->sta->rx_packets++; + return RX_QUEUED; } static ieee80211_rx_result debug_noinline @@ -2075,10 +2087,15 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; ieee80211_rx_result rxs; + struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + __le16 stype; if (!(rx->flags & IEEE80211_RX_RA_MATCH)) return RX_DROP_MONITOR; + if (rx->skb->len < 24) + return RX_DROP_MONITOR; + if (ieee80211_drop_unencrypted_mgmt(rx)) return RX_DROP_UNUSABLE; @@ -2086,16 +2103,42 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) if (rxs != RX_CONTINUE) return rxs; - if (ieee80211_vif_is_mesh(&sdata->vif)) - return ieee80211_mesh_rx_mgmt(sdata, rx->skb); + stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); - if (sdata->vif.type == NL80211_IFTYPE_ADHOC) - return ieee80211_ibss_rx_mgmt(sdata, rx->skb); + if (!ieee80211_vif_is_mesh(&sdata->vif) && + sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_STATION) + return RX_DROP_MONITOR; + + switch (stype) { + case cpu_to_le16(IEEE80211_STYPE_BEACON): + case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): + /* process for all: mesh, mlme, ibss */ + break; + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + /* process only for station */ + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return RX_DROP_MONITOR; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): + case cpu_to_le16(IEEE80211_STYPE_AUTH): + /* process only for ibss */ + if (sdata->vif.type != NL80211_IFTYPE_ADHOC) + return RX_DROP_MONITOR; + break; + default: + return RX_DROP_MONITOR; + } - if (sdata->vif.type == NL80211_IFTYPE_STATION) - return ieee80211_sta_rx_mgmt(sdata, rx->skb); + /* queue up frame and kick off work to process it */ + rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; + skb_queue_tail(&sdata->skb_queue, rx->skb); + ieee80211_queue_work(&rx->local->hw, &sdata->work); + if (rx->sta) + rx->sta->rx_packets++; - return RX_DROP_MONITOR; + return RX_QUEUED; } static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, @@ -2151,7 +2194,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, u8 rate_or_pad; __le16 chan_freq; __le16 chan_flags; - } __attribute__ ((packed)) *rthdr; + } __packed *rthdr; struct sk_buff *skb = rx->skb, *skb2; struct net_device *prev_dev = NULL; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); @@ -2201,7 +2244,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { skb2->dev = prev_dev; - netif_rx(skb2); + netif_receive_skb(skb2); } } @@ -2212,7 +2255,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, if (prev_dev) { skb->dev = prev_dev; - netif_rx(skb); + netif_receive_skb(skb); skb = NULL; } else goto out_free_skb; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index e1b0be7a57b9772ddb1c006793ca6a5929580aba..41f20fb7e67083fa8635ba50de021babe7778e8c 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -114,6 +114,10 @@ ieee80211_bss_info_update(struct ieee80211_local *local, bss->dtim_period = tim_ie->dtim_period; } + /* If the beacon had no TIM IE, or it was invalid, use 1 */ + if (beacon && !bss->dtim_period) + bss->dtim_period = 1; + /* replace old supported rates if we get new values */ srlen = 0; if (elems->supp_rates) { @@ -734,7 +738,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; int ret = -EBUSY; - enum nl80211_band band; + enum ieee80211_band band; mutex_lock(&local->scan_mtx); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ba9360a475b0c15f657017f5ca5b93997a70dd49..6d86f0c1ad0415acc4d96d0131d27bb2dc77f614 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -235,6 +235,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, spin_lock_init(&sta->lock); spin_lock_init(&sta->flaglock); INIT_WORK(&sta->drv_unblock_wk, sta_unblock); + INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); + mutex_init(&sta->ampdu_mlme.mtx); memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; @@ -246,14 +248,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, } for (i = 0; i < STA_TID_NUM; i++) { - /* timer_to_tid must be initialized with identity mapping to - * enable session_timer's data differentiation. refer to - * sta_rx_agg_session_timer_expired for useage */ + /* + * timer_to_tid must be initialized with identity mapping + * to enable session_timer's data differentiation. See + * sta_rx_agg_session_timer_expired for usage. + */ sta->timer_to_tid[i] = i; - /* tx */ - sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; - sta->ampdu_mlme.tid_tx[i] = NULL; - sta->ampdu_mlme.addba_req_num[i] = 0; } skb_queue_head_init(&sta->ps_tx_buf); skb_queue_head_init(&sta->tx_filtered); @@ -647,15 +647,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) return ret; if (sta->key) { - ieee80211_key_free(sta->key); - /* - * We have only unlinked the key, and actually destroying it - * may mean it is removed from hardware which requires that - * the key->sta pointer is still valid, so flush the key todo - * list here. - */ - ieee80211_key_todo(); - + ieee80211_key_free(local, sta->key); WARN_ON(sta->key); } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index df9d45544ca5344be3427383e8a6c0209441a856..54262e72376d0e662ebbb1b27b02a14e6a869cdb 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -42,9 +42,6 @@ * be in the queues * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping * station in power-save mode, reply when the driver unblocks. - * @WLAN_STA_DISASSOC: Disassociation in progress. - * This is used to reject TX BA session requests when disassociation - * is in progress. */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH = 1<<0, @@ -60,38 +57,44 @@ enum ieee80211_sta_info_flags { WLAN_STA_BLOCK_BA = 1<<11, WLAN_STA_PS_DRIVER = 1<<12, WLAN_STA_PSPOLL = 1<<13, - WLAN_STA_DISASSOC = 1<<14, }; #define STA_TID_NUM 16 #define ADDBA_RESP_INTERVAL HZ -#define HT_AGG_MAX_RETRIES (0x3) +#define HT_AGG_MAX_RETRIES 0x3 -#define HT_AGG_STATE_INITIATOR_SHIFT (4) - -#define HT_ADDBA_REQUESTED_MSK BIT(0) -#define HT_ADDBA_DRV_READY_MSK BIT(1) -#define HT_ADDBA_RECEIVED_MSK BIT(2) -#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) -#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT) -#define HT_AGG_STATE_IDLE (0x0) -#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \ - HT_ADDBA_DRV_READY_MSK | \ - HT_ADDBA_RECEIVED_MSK) +#define HT_AGG_STATE_DRV_READY 0 +#define HT_AGG_STATE_RESPONSE_RECEIVED 1 +#define HT_AGG_STATE_OPERATIONAL 2 +#define HT_AGG_STATE_STOPPING 3 +#define HT_AGG_STATE_WANT_START 4 +#define HT_AGG_STATE_WANT_STOP 5 /** * struct tid_ampdu_tx - TID aggregation information (Tx). * + * @rcu_head: rcu head for freeing structure * @addba_resp_timer: timer for peer's response to addba request * @pending: pending frames queue -- use sta's spinlock to protect - * @ssn: Starting Sequence Number expected to be aggregated. * @dialog_token: dialog token for aggregation session + * @state: session state (see above) + * @stop_initiator: initiator of a session stop + * + * This structure is protected by RCU and the per-station + * spinlock. Assignments to the array holding it must hold + * the spinlock, only the TX path can access it under RCU + * lock-free if, and only if, the state has the flag + * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path + * must also acquire the spinlock and re-check the state, + * see comments in the tx code touching it. */ struct tid_ampdu_tx { + struct rcu_head rcu_head; struct timer_list addba_resp_timer; struct sk_buff_head pending; - u16 ssn; + unsigned long state; u8 dialog_token; + u8 stop_initiator; }; /** @@ -106,8 +109,18 @@ struct tid_ampdu_tx { * @buf_size: buffer size for incoming A-MPDUs * @timeout: reset timer value (in TUs). * @dialog_token: dialog token for aggregation session + * @rcu_head: RCU head used for freeing this struct + * + * This structure is protected by RCU and the per-station + * spinlock. Assignments to the array holding it must hold + * the spinlock, only the RX path can access it under RCU + * lock-free. The RX path, since it is single-threaded, + * can even modify the structure without locking since the + * only other modifications to it are done when the struct + * can not yet or no longer be found by the RX path. */ struct tid_ampdu_rx { + struct rcu_head rcu_head; struct sk_buff **reorder_buf; unsigned long *reorder_time; struct timer_list session_timer; @@ -119,6 +132,32 @@ struct tid_ampdu_rx { u8 dialog_token; }; +/** + * struct sta_ampdu_mlme - STA aggregation information. + * + * @tid_rx: aggregation info for Rx per TID -- RCU protected + * @tid_tx: aggregation info for Tx per TID + * @addba_req_num: number of times addBA request has been sent. + * @dialog_token_allocator: dialog token enumerator for each new session; + * @work: work struct for starting/stopping aggregation + * @tid_rx_timer_expired: bitmap indicating on which TIDs the + * RX timer expired until the work for it runs + * @mtx: mutex to protect all TX data (except non-NULL assignments + * to tid_tx[idx], which are protected by the sta spinlock) + */ +struct sta_ampdu_mlme { + struct mutex mtx; + /* rx */ + struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; + unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; + /* tx */ + struct work_struct work; + struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; + u8 addba_req_num[STA_TID_NUM]; + u8 dialog_token_allocator; +}; + + /** * enum plink_state - state of a mesh peer link finite state machine * @@ -142,28 +181,6 @@ enum plink_state { PLINK_BLOCKED }; -/** - * struct sta_ampdu_mlme - STA aggregation information. - * - * @tid_active_rx: TID's state in Rx session state machine. - * @tid_rx: aggregation info for Rx per TID - * @tid_state_tx: TID's state in Tx session state machine. - * @tid_tx: aggregation info for Tx per TID - * @addba_req_num: number of times addBA request has been sent. - * @dialog_token_allocator: dialog token enumerator for each new session; - */ -struct sta_ampdu_mlme { - /* rx */ - bool tid_active_rx[STA_TID_NUM]; - struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; - /* tx */ - u8 tid_state_tx[STA_TID_NUM]; - struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; - u8 addba_req_num[STA_TID_NUM]; - u8 dialog_token_allocator; -}; - - /** * struct sta_info - STA information * @@ -410,20 +427,20 @@ void for_each_sta_info_type_check(struct ieee80211_local *local, { } -#define for_each_sta_info(local, _addr, sta, nxt) \ +#define for_each_sta_info(local, _addr, _sta, nxt) \ for ( /* initialise loop */ \ - sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\ - nxt = sta ? rcu_dereference(sta->hnext) : NULL; \ + _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\ + nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \ /* typecheck */ \ - for_each_sta_info_type_check(local, (_addr), sta, nxt), \ + for_each_sta_info_type_check(local, (_addr), _sta, nxt),\ /* continue condition */ \ - sta; \ + _sta; \ /* advance loop */ \ - sta = nxt, \ - nxt = sta ? rcu_dereference(sta->hnext) : NULL \ + _sta = nxt, \ + nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \ ) \ /* compare address and run code only if it matches */ \ - if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0) + if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0) /* * Get STA info by index, BROKEN! diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 94613af009f32d668f70e1522e5842d78eabf414..10caec5ea8fa7740d9617605adf1590eefa2a730 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -47,7 +47,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, /* * This skb 'survived' a round-trip through the driver, and * hopefully the driver didn't mangle it too badly. However, - * we can definitely not rely on the the control information + * we can definitely not rely on the control information * being correct. Clear it so we don't get junk there, and * indicate that it needs new processing, but must not be * modified/encrypted again. @@ -377,7 +377,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { skb2->dev = prev_dev; - netif_rx(skb2); + netif_receive_skb(skb2); } } @@ -386,7 +386,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) } if (prev_dev) { skb->dev = prev_dev; - netif_rx(skb); + netif_receive_skb(skb); skb = NULL; } rcu_read_unlock(); diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c index 7ef491e9d66d0954aa6eba8891bcb22319f5b54b..e840c9cd46db94b122b56f2fb34bf53873585896 100644 --- a/net/mac80211/tkip.c +++ b/net/mac80211/tkip.c @@ -202,9 +202,9 @@ EXPORT_SYMBOL(ieee80211_get_tkip_key); * @payload_len is the length of payload (_not_ including IV/ICV length). * @ta is the transmitter addresses. */ -void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, - struct ieee80211_key *key, - u8 *pos, size_t payload_len, u8 *ta) +int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, + struct ieee80211_key *key, + u8 *pos, size_t payload_len, u8 *ta) { u8 rc4key[16]; struct tkip_ctx *ctx = &key->u.tkip.tx; @@ -216,7 +216,7 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); - ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); + return ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); } /* Decrypt packet payload with TKIP using @key. @pos is a pointer to the diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h index d4714383f5fce5a3f5216f30669b669b8a11c1aa..7e83dee976fa0d4834ec6c09d6a0bb76b3e1123e 100644 --- a/net/mac80211/tkip.h +++ b/net/mac80211/tkip.h @@ -15,7 +15,7 @@ u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); -void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, +int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, struct ieee80211_key *key, u8 *pos, size_t payload_len, u8 *ta); enum { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 680bcb7093dbdc4f00cf5baf374812379ccc0040..c54db966926b455dc3d72ae0f71b7c71d3204861 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -575,17 +575,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) return TX_CONTINUE; } -static ieee80211_tx_result debug_noinline -ieee80211_tx_h_sta(struct ieee80211_tx_data *tx) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); - - if (tx->sta && tx->sta->uploaded) - info->control.sta = &tx->sta->sta; - - return TX_CONTINUE; -} - static ieee80211_tx_result debug_noinline ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) { @@ -1092,6 +1081,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, return true; } +static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, + struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct tid_ampdu_tx *tid_tx, + int tid) +{ + bool queued = false; + + if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { + info->flags |= IEEE80211_TX_CTL_AMPDU; + } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { + /* + * nothing -- this aggregation session is being started + * but that might still fail with the driver + */ + } else { + spin_lock(&tx->sta->lock); + /* + * Need to re-check now, because we may get here + * + * 1) in the window during which the setup is actually + * already done, but not marked yet because not all + * packets are spliced over to the driver pending + * queue yet -- if this happened we acquire the lock + * either before or after the splice happens, but + * need to recheck which of these cases happened. + * + * 2) during session teardown, if the OPERATIONAL bit + * was cleared due to the teardown but the pointer + * hasn't been assigned NULL yet (or we loaded it + * before it was assigned) -- in this case it may + * now be NULL which means we should just let the + * packet pass through because splicing the frames + * back is already done. + */ + tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; + + if (!tid_tx) { + /* do nothing, let packet pass through */ + } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { + info->flags |= IEEE80211_TX_CTL_AMPDU; + } else { + queued = true; + info->control.vif = &tx->sdata->vif; + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + __skb_queue_tail(&tid_tx->pending, skb); + } + spin_unlock(&tx->sta->lock); + } + + return queued; +} + /* * initialises @tx */ @@ -1104,8 +1146,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, tid; - u8 *qc, *state; - bool queued = false; + u8 *qc; memset(tx, 0, sizeof(*tx)); tx->skb = skb; @@ -1157,35 +1198,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, qc = ieee80211_get_qos_ctl(hdr); tid = *qc & IEEE80211_QOS_CTL_TID_MASK; - spin_lock(&tx->sta->lock); - /* - * XXX: This spinlock could be fairly expensive, but see the - * comment in agg-tx.c:ieee80211_agg_tx_operational(). - * One way to solve this would be to do something RCU-like - * for managing the tid_tx struct and using atomic bitops - * for the actual state -- by introducing an actual - * 'operational' bit that would be possible. It would - * require changing ieee80211_agg_tx_operational() to - * set that bit, and changing the way tid_tx is managed - * everywhere, including races between that bit and - * tid_tx going away (tid_tx being added can be easily - * committed to memory before the 'operational' bit). - */ - tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; - state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; - if (*state == HT_AGG_STATE_OPERATIONAL) { - info->flags |= IEEE80211_TX_CTL_AMPDU; - } else if (*state != HT_AGG_STATE_IDLE) { - /* in progress */ - queued = true; - info->control.vif = &sdata->vif; - info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; - __skb_queue_tail(&tid_tx->pending, skb); - } - spin_unlock(&tx->sta->lock); + tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); + if (tid_tx) { + bool queued; - if (unlikely(queued)) - return TX_QUEUED; + queued = ieee80211_tx_prep_agg(tx, skb, info, + tid_tx, tid); + + if (unlikely(queued)) + return TX_QUEUED; + } } if (is_multicast_ether_addr(hdr->addr1)) { @@ -1274,6 +1296,11 @@ static int __ieee80211_tx(struct ieee80211_local *local, break; } + if (sta && sta->uploaded) + info->control.sta = &sta->sta; + else + info->control.sta = NULL; + ret = drv_tx(local, skb); if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { dev_kfree_skb(skb); @@ -1313,7 +1340,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) CALL_TXH(ieee80211_tx_h_check_assoc); CALL_TXH(ieee80211_tx_h_ps_buf); CALL_TXH(ieee80211_tx_h_select_key); - CALL_TXH(ieee80211_tx_h_sta); if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_rate_ctrl); @@ -1909,11 +1935,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, h_pos += encaps_len; } +#ifdef CONFIG_MAC80211_MESH if (meshhdrlen > 0) { memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); nh_pos += meshhdrlen; h_pos += meshhdrlen; } +#endif if (ieee80211_is_data_qos(fc)) { __le16 *qos_control; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5b79d552780a03dd68641e8693835ec818aaf280..748387d45bc05f8fbab8a579066c7c0771a6cca8 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -803,8 +803,12 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) /* after reinitialize QoS TX queues setting to default, * disable QoS at all */ - local->hw.conf.flags &= ~IEEE80211_CONF_QOS; - drv_config(local, IEEE80211_CONF_CHANGE_QOS); + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { + sdata->vif.bss_conf.qos = + sdata->vif.type != NL80211_IFTYPE_STATION; + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); + } } void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, @@ -1138,18 +1142,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) } mutex_unlock(&local->sta_mtx); - /* Clear Suspend state so that ADDBA requests can be processed */ - - rcu_read_lock(); - - if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { - list_for_each_entry_rcu(sta, &local->sta_list, list) { - clear_sta_flags(sta, WLAN_STA_BLOCK_BA); - } - } - - rcu_read_unlock(); - /* setup RTS threshold */ drv_set_rts_threshold(local, hw->wiphy->rts_threshold); @@ -1173,7 +1165,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT | BSS_CHANGED_BSSID | - BSS_CHANGED_CQM; + BSS_CHANGED_CQM | + BSS_CHANGED_QOS; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: @@ -1202,13 +1195,26 @@ int ieee80211_reconfig(struct ieee80211_local *local) } } - rcu_read_lock(); + /* + * Clear the WLAN_STA_BLOCK_BA flag so new aggregation + * sessions can be established after a resume. + * + * Also tear down aggregation sessions since reconfiguring + * them in a hardware restart scenario is not easily done + * right now, and the hardware will have lost information + * about the sessions, but we and the AP still think they + * are active. This is really a workaround though. + */ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { - list_for_each_entry_rcu(sta, &local->sta_list, list) { + mutex_lock(&local->sta_mtx); + + list_for_each_entry(sta, &local->sta_list, list) { ieee80211_sta_tear_down_BA_sessions(sta); + clear_sta_flags(sta, WLAN_STA_BLOCK_BA); } + + mutex_unlock(&local->sta_mtx); } - rcu_read_unlock(); /* add back keys */ list_for_each_entry(sdata, &local->interfaces, list) diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 5f3a4113bda1682bb0d30f0eaebd6f153037e3b5..9ebc8d8a1f5b20d3e6872610042128848ec0a4a8 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -32,13 +32,16 @@ int ieee80211_wep_init(struct ieee80211_local *local) local->wep_tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(local->wep_tx_tfm)) + if (IS_ERR(local->wep_tx_tfm)) { + local->wep_rx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_tx_tfm); + } local->wep_rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(local->wep_rx_tfm)) { crypto_free_blkcipher(local->wep_tx_tfm); + local->wep_tx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_rx_tfm); } @@ -47,8 +50,10 @@ int ieee80211_wep_init(struct ieee80211_local *local) void ieee80211_wep_free(struct ieee80211_local *local) { - crypto_free_blkcipher(local->wep_tx_tfm); - crypto_free_blkcipher(local->wep_rx_tfm); + if (!IS_ERR(local->wep_tx_tfm)) + crypto_free_blkcipher(local->wep_tx_tfm); + if (!IS_ERR(local->wep_rx_tfm)) + crypto_free_blkcipher(local->wep_rx_tfm); } static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) @@ -122,19 +127,24 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local, /* Perform WEP encryption using given key. data buffer must have tailroom * for 4-byte ICV. data_len must not include this ICV. Note: this function * does _not_ add IV. data = RC4(data | CRC32(data)) */ -void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, - size_t klen, u8 *data, size_t data_len) +int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, + size_t klen, u8 *data, size_t data_len) { struct blkcipher_desc desc = { .tfm = tfm }; struct scatterlist sg; __le32 icv; + if (IS_ERR(tfm)) + return -1; + icv = cpu_to_le32(~crc32_le(~0, data, data_len)); put_unaligned(icv, (__le32 *)(data + data_len)); crypto_blkcipher_setkey(tfm, rc4key, klen); sg_init_one(&sg, data, data_len + WEP_ICV_LEN); crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); + + return 0; } @@ -168,10 +178,8 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, /* Add room for ICV */ skb_put(skb, WEP_ICV_LEN); - ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, - iv + WEP_IV_LEN, len); - - return 0; + return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, + iv + WEP_IV_LEN, len); } @@ -185,6 +193,9 @@ int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, struct scatterlist sg; __le32 crc; + if (IS_ERR(tfm)) + return -1; + crypto_blkcipher_setkey(tfm, rc4key, klen); sg_init_one(&sg, data, data_len + WEP_ICV_LEN); crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h index fe29d7e5759fe3704658afb430b7e071921a21d0..58654ee335186d2a3d0f588221dd62d22cf4cf0d 100644 --- a/net/mac80211/wep.h +++ b/net/mac80211/wep.h @@ -18,7 +18,7 @@ int ieee80211_wep_init(struct ieee80211_local *local); void ieee80211_wep_free(struct ieee80211_local *local); -void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, +int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len); int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, diff --git a/net/mac80211/work.c b/net/mac80211/work.c index b025dc7bb0fd4e1decb356709cb3ac5d3163db58..81d4ad64184a2e76d35414586746fa6d03f8cf9d 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c @@ -560,6 +560,22 @@ ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk) return WORK_ACT_TIMEOUT; } +static enum work_action __must_check +ieee80211_assoc_beacon_wait(struct ieee80211_work *wk) +{ + if (wk->started) + return WORK_ACT_TIMEOUT; + + /* + * Wait up to one beacon interval ... + * should this be more if we miss one? + */ + printk(KERN_DEBUG "%s: waiting for beacon from %pM\n", + wk->sdata->name, wk->filter_ta); + wk->timeout = TU_TO_EXP_TIME(wk->assoc.bss->beacon_interval); + return WORK_ACT_NONE; +} + static void ieee80211_auth_challenge(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len) @@ -709,6 +725,25 @@ ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk, return WORK_ACT_DONE; } +static enum work_action __must_check +ieee80211_rx_mgmt_beacon(struct ieee80211_work *wk, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + struct ieee80211_local *local = sdata->local; + + ASSERT_WORK_MTX(local); + + if (wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) + return WORK_ACT_MISMATCH; + + if (len < 24 + 12) + return WORK_ACT_NONE; + + printk(KERN_DEBUG "%s: beacon received\n", sdata->name); + return WORK_ACT_DONE; +} + static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, struct sk_buff *skb) { @@ -731,6 +766,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, case IEEE80211_WORK_DIRECT_PROBE: case IEEE80211_WORK_AUTH: case IEEE80211_WORK_ASSOC: + case IEEE80211_WORK_ASSOC_BEACON_WAIT: bssid = wk->filter_ta; break; default: @@ -745,6 +781,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, continue; switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_BEACON: + rma = ieee80211_rx_mgmt_beacon(wk, mgmt, skb->len); + break; case IEEE80211_STYPE_PROBE_RESP: rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len, rx_status); @@ -840,7 +879,7 @@ static void ieee80211_work_work(struct work_struct *work) /* * ieee80211_queue_work() should have picked up most cases, - * here we'll pick the the rest. + * here we'll pick the rest. */ if (WARN(local->suspended, "work scheduled while going to suspend\n")) return; @@ -916,6 +955,9 @@ static void ieee80211_work_work(struct work_struct *work) case IEEE80211_WORK_REMAIN_ON_CHANNEL: rma = ieee80211_remain_on_channel_timeout(wk); break; + case IEEE80211_WORK_ASSOC_BEACON_WAIT: + rma = ieee80211_assoc_beacon_wait(wk); + break; } wk->started = started; @@ -1065,6 +1107,7 @@ ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata, case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: + case IEEE80211_STYPE_BEACON: skb_queue_tail(&local->work_skb_queue, skb); ieee80211_queue_work(&local->hw, &local->work_work); return RX_QUEUED; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 0adbcc941ac917952d9eb7ed9308e23c4644c720..8d59d27d887e3a0b96d9d995e3138595f0a13c79 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -183,9 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) skb_put(skb, TKIP_ICV_LEN); hdr = (struct ieee80211_hdr *) skb->data; - ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, - key, pos, len, hdr->addr2); - return 0; + return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, + key, pos, len, hdr->addr2); } @@ -436,6 +435,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[CCMP_PN_LEN]; int data_len; + int queue; hdrlen = ieee80211_hdrlen(hdr->frame_control); @@ -453,7 +453,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) ccmp_hdr2pn(pn, skb->data + hdrlen); - if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { + queue = ieee80211_is_mgmt(hdr->frame_control) ? + NUM_RX_DATA_QUEUES : rx->queue; + + if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } @@ -470,7 +473,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; } - memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN); + memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); /* Remove CCMP header and MIC */ skb_trim(skb, skb->len - CCMP_MIC_LEN); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 8593a77cfea906ac0257405fb8aa9ae0e0bef597..43288259f4a145d31374a2c0d629ba3b0d16db0e 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -40,27 +40,6 @@ config NF_CONNTRACK if NF_CONNTRACK -config NF_CT_ACCT - bool "Connection tracking flow accounting" - depends on NETFILTER_ADVANCED - help - If this option is enabled, the connection tracking code will - keep per-flow packet and byte counters. - - Those counters can be used for flow-based accounting or the - `connbytes' match. - - Please note that currently this option only sets a default state. - You may change it at boot time with nf_conntrack.acct=0/1 kernel - parameter or by loading the nf_conntrack module with acct=0/1. - - You may also disable/enable it on a running system with: - sysctl net.netfilter.nf_conntrack_acct=0/1 - - This option will be removed in 2.6.29. - - If unsure, say `N'. - config NF_CONNTRACK_MARK bool 'Connection mark tracking support' depends on NETFILTER_ADVANCED @@ -347,6 +326,22 @@ config NETFILTER_XT_CONNMARK comment "Xtables targets" +config NETFILTER_XT_TARGET_CHECKSUM + tristate "CHECKSUM target support" + depends on IP_NF_MANGLE || IP6_NF_MANGLE + depends on NETFILTER_ADVANCED + ---help--- + This option adds a `CHECKSUM' target, which can be used in the iptables mangle + table. + + You can use this target to compute and fill in the checksum in + a packet that lacks a checksum. This is particularly useful, + if you need to work around old applications such as dhcp clients, + that do not work well with checksum offloads, but don't want to disable + checksum offload in your device. + + To compile it as a module, choose M here. If unsure, say N. + config NETFILTER_XT_TARGET_CLASSIFY tristate '"CLASSIFY" target support' depends on NETFILTER_ADVANCED @@ -424,6 +419,18 @@ config NETFILTER_XT_TARGET_HL since you can easily create immortal packets that loop forever on the network. +config NETFILTER_XT_TARGET_IDLETIMER + tristate "IDLETIMER target support" + depends on NETFILTER_ADVANCED + help + + This option adds the `IDLETIMER' target. Each matching packet + resets the timer associated with label specified when the rule is + added. When the timer expires, it triggers a sysfs notification. + The remaining time for expiration can be read via sysfs. + + To compile it as a module, choose M here. If unsure, say N. + config NETFILTER_XT_TARGET_LED tristate '"LED" target support' depends on LEDS_CLASS && LEDS_TRIGGERS @@ -503,7 +510,7 @@ config NETFILTER_XT_TARGET_RATEEST To compile it as a module, choose M here. If unsure, say N. config NETFILTER_XT_TARGET_TEE - tristate '"TEE" - packet cloning to alternate destiantion' + tristate '"TEE" - packet cloning to alternate destination' depends on NETFILTER_ADVANCED depends on (IPV6 || IPV6=n) depends on !NF_CONNTRACK || NF_CONNTRACK @@ -618,7 +625,6 @@ config NETFILTER_XT_MATCH_CONNBYTES tristate '"connbytes" per-connection counter match support' depends on NF_CONNTRACK depends on NETFILTER_ADVANCED - select NF_CT_ACCT help This option adds a `connbytes' match, which allows you to match the number of bytes and/or packets for each direction within a connection. @@ -657,6 +663,15 @@ config NETFILTER_XT_MATCH_CONNTRACK To compile it as a module, choose M here. If unsure, say N. +config NETFILTER_XT_MATCH_CPU + tristate '"cpu" match support' + depends on NETFILTER_ADVANCED + help + CPU matching allows you to match packets based on the CPU + currently handling the packet. + + To compile it as a module, choose M here. If unsure, say N. + config NETFILTER_XT_MATCH_DCCP tristate '"dccp" protocol match support' depends on NETFILTER_ADVANCED @@ -736,6 +751,16 @@ config NETFILTER_XT_MATCH_IPRANGE If unsure, say M. +config NETFILTER_XT_MATCH_IPVS + tristate '"ipvs" match support' + depends on IP_VS + depends on NETFILTER_ADVANCED + depends on NF_CONNTRACK + help + This option allows you to match against IPVS properties of a packet. + + If unsure, say N. + config NETFILTER_XT_MATCH_LENGTH tristate '"length" match support' depends on NETFILTER_ADVANCED diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 14e3a8fd81803fd260f3af6580bf4e2567b30506..441050f31111ae92ddda5db747d99afe8dad6293 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o # targets +obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o @@ -61,6 +62,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o +obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o # matches obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o @@ -68,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o +obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o @@ -75,6 +78,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o +obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index 712ccad133447953fdc86e8e07127f582c9a98dc..46a77d5c3887dc7c874b327a77afb539c8d92777 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig @@ -3,7 +3,7 @@ # menuconfig IP_VS tristate "IP virtual server support" - depends on NET && INET && NETFILTER + depends on NET && INET && NETFILTER && NF_CONNTRACK ---help--- IP Virtual Server support will let you build a high-performance virtual server based on cluster of two or more real servers. This @@ -26,7 +26,7 @@ if IP_VS config IP_VS_IPV6 bool "IPv6 support for IPVS" - depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6) + depends on IPV6 = y || IP_VS = IPV6 ---help--- Add IPv6 support to IPVS. This is incomplete and might be dangerous. @@ -87,19 +87,16 @@ config IP_VS_PROTO_UDP protocol. Say Y if unsure. config IP_VS_PROTO_AH_ESP - bool - depends on UNDEFINED + def_bool IP_VS_PROTO_ESP || IP_VS_PROTO_AH config IP_VS_PROTO_ESP bool "ESP load balancing support" - select IP_VS_PROTO_AH_ESP ---help--- This option enables support for load balancing ESP (Encapsulation Security Payload) transport protocol. Say Y if unsure. config IP_VS_PROTO_AH bool "AH load balancing support" - select IP_VS_PROTO_AH_ESP ---help--- This option enables support for load balancing AH (Authentication Header) transport protocol. Say Y if unsure. @@ -238,7 +235,7 @@ comment 'IPVS application helper' config IP_VS_FTP tristate "FTP protocol helper" - depends on IP_VS_PROTO_TCP + depends on IP_VS_PROTO_TCP && NF_NAT ---help--- FTP is a protocol that transfers IP address and/or port number in the payload. In the virtual server via Network Address Translation, diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 1cb0e834f8ff36c784e735eab7c3aae36ea7329f..e76f87f4aca80fdc4e9f0557c0b491dcdeff7556 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c @@ -569,49 +569,6 @@ static const struct file_operations ip_vs_app_fops = { }; #endif - -/* - * Replace a segment of data with a new segment - */ -int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, - char *o_buf, int o_len, char *n_buf, int n_len) -{ - int diff; - int o_offset; - int o_left; - - EnterFunction(9); - - diff = n_len - o_len; - o_offset = o_buf - (char *)skb->data; - /* The length of left data after o_buf+o_len in the skb data */ - o_left = skb->len - (o_offset + o_len); - - if (diff <= 0) { - memmove(o_buf + n_len, o_buf + o_len, o_left); - memcpy(o_buf, n_buf, n_len); - skb_trim(skb, skb->len + diff); - } else if (diff <= skb_tailroom(skb)) { - skb_put(skb, diff); - memmove(o_buf + n_len, o_buf + o_len, o_left); - memcpy(o_buf, n_buf, n_len); - } else { - if (pskb_expand_head(skb, skb_headroom(skb), diff, pri)) - return -ENOMEM; - skb_put(skb, diff); - memmove(skb->data + o_offset + n_len, - skb->data + o_offset + o_len, o_left); - skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len); - } - - /* must update the iph total length here */ - ip_hdr(skb)->tot_len = htons(skb->len); - - LeaveFunction(9); - return 0; -} - - int __init ip_vs_app_init(void) { /* we will replace it with proc_net_ipvs_create() soon */ diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index ff04e9edbed6636198b294796654c5aab64d9fdd..b71c69a2db138ac30aadb13e02a697322d54ac90 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -158,6 +158,9 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) unsigned hash; int ret; + if (cp->flags & IP_VS_CONN_F_ONE_PACKET) + return 0; + /* Hash by protocol, client address and port */ hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); @@ -268,6 +271,29 @@ struct ip_vs_conn *ip_vs_conn_in_get return cp; } +struct ip_vs_conn * +ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, + const struct ip_vs_iphdr *iph, + unsigned int proto_off, int inverse) +{ + __be16 _ports[2], *pptr; + + pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); + if (pptr == NULL) + return NULL; + + if (likely(!inverse)) + return ip_vs_conn_in_get(af, iph->protocol, + &iph->saddr, pptr[0], + &iph->daddr, pptr[1]); + else + return ip_vs_conn_in_get(af, iph->protocol, + &iph->daddr, pptr[1], + &iph->saddr, pptr[0]); +} +EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto); + /* Get reference to connection template */ struct ip_vs_conn *ip_vs_ct_in_get (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, @@ -353,14 +379,37 @@ struct ip_vs_conn *ip_vs_conn_out_get return ret; } +struct ip_vs_conn * +ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, + struct ip_vs_protocol *pp, + const struct ip_vs_iphdr *iph, + unsigned int proto_off, int inverse) +{ + __be16 _ports[2], *pptr; + + pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); + if (pptr == NULL) + return NULL; + + if (likely(!inverse)) + return ip_vs_conn_out_get(af, iph->protocol, + &iph->saddr, pptr[0], + &iph->daddr, pptr[1]); + else + return ip_vs_conn_out_get(af, iph->protocol, + &iph->daddr, pptr[1], + &iph->saddr, pptr[0]); +} +EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto); /* * Put back the conn and restart its timer with its timeout */ void ip_vs_conn_put(struct ip_vs_conn *cp) { - /* reset it expire in its timeout */ - mod_timer(&cp->timer, jiffies+cp->timeout); + unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ? + 0 : cp->timeout; + mod_timer(&cp->timer, jiffies+t); __ip_vs_conn_put(cp); } @@ -653,7 +702,7 @@ static void ip_vs_conn_expire(unsigned long data) /* * unhash it if it is hashed in the conn table */ - if (!ip_vs_conn_unhash(cp)) + if (!ip_vs_conn_unhash(cp) && !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) goto expire_later; /* diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 1cd6e3fd058b71b6f49a51589a52451944e177e0..4f8ddba480110167674fa36f6854e53f673e8d68 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -54,7 +54,6 @@ EXPORT_SYMBOL(register_ip_vs_scheduler); EXPORT_SYMBOL(unregister_ip_vs_scheduler); -EXPORT_SYMBOL(ip_vs_skb_replace); EXPORT_SYMBOL(ip_vs_proto_name); EXPORT_SYMBOL(ip_vs_conn_new); EXPORT_SYMBOL(ip_vs_conn_in_get); @@ -194,6 +193,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, struct ip_vs_dest *dest; struct ip_vs_conn *ct; __be16 dport; /* destination port to forward */ + __be16 flags; union nf_inet_addr snet; /* source network of the client, after masking */ @@ -340,6 +340,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc, dport = ports[1]; } + flags = (svc->flags & IP_VS_SVC_F_ONEPACKET + && iph.protocol == IPPROTO_UDP)? + IP_VS_CONN_F_ONE_PACKET : 0; + /* * Create a new connection according to the template */ @@ -347,7 +351,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, &iph.saddr, ports[0], &iph.daddr, ports[1], &dest->addr, dport, - 0, + flags, dest); if (cp == NULL) { ip_vs_conn_put(ct); @@ -377,7 +381,7 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) struct ip_vs_conn *cp = NULL; struct ip_vs_iphdr iph; struct ip_vs_dest *dest; - __be16 _ports[2], *pptr; + __be16 _ports[2], *pptr, flags; ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); @@ -407,6 +411,10 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) return NULL; } + flags = (svc->flags & IP_VS_SVC_F_ONEPACKET + && iph.protocol == IPPROTO_UDP)? + IP_VS_CONN_F_ONE_PACKET : 0; + /* * Create a connection entry. */ @@ -414,7 +422,7 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) &iph.saddr, pptr[0], &iph.daddr, pptr[1], &dest->addr, dest->port ? dest->port : pptr[1], - 0, + flags, dest); if (cp == NULL) return NULL; @@ -464,6 +472,9 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) { int ret, cs; struct ip_vs_conn *cp; + __u16 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && + iph.protocol == IPPROTO_UDP)? + IP_VS_CONN_F_ONE_PACKET : 0; union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; ip_vs_service_put(svc); @@ -474,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, &iph.saddr, pptr[0], &iph.daddr, pptr[1], &daddr, 0, - IP_VS_CONN_F_BYPASS, + IP_VS_CONN_F_BYPASS | flags, NULL); if (cp == NULL) return NF_DROP; @@ -524,26 +535,6 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, return NF_DROP; } - -/* - * It is hooked before NF_IP_PRI_NAT_SRC at the NF_INET_POST_ROUTING - * chain, and is used for VS/NAT. - * It detects packets for VS/NAT connections and sends the packets - * immediately. This can avoid that iptable_nat mangles the packets - * for VS/NAT. - */ -static unsigned int ip_vs_post_routing(unsigned int hooknum, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - if (!skb->ipvs_property) - return NF_ACCEPT; - /* The packet was sent from IPVS, exit this chain */ - return NF_STOP; -} - __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) { return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); @@ -1487,14 +1478,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { .hooknum = NF_INET_FORWARD, .priority = 99, }, - /* Before the netfilter connection tracking, exit from POST_ROUTING */ - { - .hook = ip_vs_post_routing, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP_PRI_NAT_SRC-1, - }, #ifdef CONFIG_IP_VS_IPV6 /* After packet filtering, forward packet through VS/DR, VS/TUN, * or VS/NAT(change destination), so that filtering rules can be @@ -1523,14 +1506,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { .hooknum = NF_INET_FORWARD, .priority = 99, }, - /* Before the netfilter connection tracking, exit from POST_ROUTING */ - { - .hook = ip_vs_post_routing, - .owner = THIS_MODULE, - .pf = PF_INET6, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP6_PRI_NAT_SRC-1, - }, #endif }; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 36dc1d88c2fa56cff3f29566965de8c80e9e9d99..0f0c079c422a03f87616a1c1df76c0e387245a9e 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1864,14 +1864,16 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) svc->scheduler->name); else #endif - seq_printf(seq, "%s %08X:%04X %s ", + seq_printf(seq, "%s %08X:%04X %s %s ", ip_vs_proto_name(svc->protocol), ntohl(svc->addr.ip), ntohs(svc->port), - svc->scheduler->name); + svc->scheduler->name, + (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); } else { - seq_printf(seq, "FWM %08X %s ", - svc->fwmark, svc->scheduler->name); + seq_printf(seq, "FWM %08X %s %s", + svc->fwmark, svc->scheduler->name, + (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); } if (svc->flags & IP_VS_SVC_F_PERSISTENT) diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 2ae747a376a597296652be7b1051924aa5598bd8..f228a17ec6499b1440cae8a9dd9a1d8e312c4a00 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -20,6 +20,17 @@ * * Author: Wouter Gadeyne * + * + * Code for ip_vs_expect_related and ip_vs_expect_callback is taken from + * http://www.ssi.bg/~ja/nfct/: + * + * ip_vs_nfct.c: Netfilter connection tracking support for IPVS + * + * Portions Copyright (C) 2001-2002 + * Antefacto Ltd, 181 Parnell St, Dublin 1, Ireland. + * + * Portions Copyright (C) 2003-2008 + * Julian Anastasov */ #define KMSG_COMPONENT "IPVS" @@ -32,6 +43,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -43,6 +57,16 @@ #define SERVER_STRING "227 Entering Passive Mode (" #define CLIENT_STRING "PORT " +#define FMT_TUPLE "%pI4:%u->%pI4:%u/%u" +#define ARG_TUPLE(T) &(T)->src.u3.ip, ntohs((T)->src.u.all), \ + &(T)->dst.u3.ip, ntohs((T)->dst.u.all), \ + (T)->dst.protonum + +#define FMT_CONN "%pI4:%u->%pI4:%u->%pI4:%u/%u:%u" +#define ARG_CONN(C) &((C)->caddr.ip), ntohs((C)->cport), \ + &((C)->vaddr.ip), ntohs((C)->vport), \ + &((C)->daddr.ip), ntohs((C)->dport), \ + (C)->protocol, (C)->state /* * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper @@ -123,6 +147,119 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit, return 1; } +/* + * Called from init_conntrack() as expectfn handler. + */ +static void +ip_vs_expect_callback(struct nf_conn *ct, + struct nf_conntrack_expect *exp) +{ + struct nf_conntrack_tuple *orig, new_reply; + struct ip_vs_conn *cp; + + if (exp->tuple.src.l3num != PF_INET) + return; + + /* + * We assume that no NF locks are held before this callback. + * ip_vs_conn_out_get and ip_vs_conn_in_get should match their + * expectations even if they use wildcard values, now we provide the + * actual values from the newly created original conntrack direction. + * The conntrack is confirmed when packet reaches IPVS hooks. + */ + + /* RS->CLIENT */ + orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; + cp = ip_vs_conn_out_get(exp->tuple.src.l3num, orig->dst.protonum, + &orig->src.u3, orig->src.u.tcp.port, + &orig->dst.u3, orig->dst.u.tcp.port); + if (cp) { + /* Change reply CLIENT->RS to CLIENT->VS */ + new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple; + IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", " + FMT_TUPLE ", found inout cp=" FMT_CONN "\n", + __func__, ct, ct->status, + ARG_TUPLE(orig), ARG_TUPLE(&new_reply), + ARG_CONN(cp)); + new_reply.dst.u3 = cp->vaddr; + new_reply.dst.u.tcp.port = cp->vport; + IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", " FMT_TUPLE + ", inout cp=" FMT_CONN "\n", + __func__, ct, + ARG_TUPLE(orig), ARG_TUPLE(&new_reply), + ARG_CONN(cp)); + goto alter; + } + + /* CLIENT->VS */ + cp = ip_vs_conn_in_get(exp->tuple.src.l3num, orig->dst.protonum, + &orig->src.u3, orig->src.u.tcp.port, + &orig->dst.u3, orig->dst.u.tcp.port); + if (cp) { + /* Change reply VS->CLIENT to RS->CLIENT */ + new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple; + IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", " + FMT_TUPLE ", found outin cp=" FMT_CONN "\n", + __func__, ct, ct->status, + ARG_TUPLE(orig), ARG_TUPLE(&new_reply), + ARG_CONN(cp)); + new_reply.src.u3 = cp->daddr; + new_reply.src.u.tcp.port = cp->dport; + IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", " + FMT_TUPLE ", outin cp=" FMT_CONN "\n", + __func__, ct, + ARG_TUPLE(orig), ARG_TUPLE(&new_reply), + ARG_CONN(cp)); + goto alter; + } + + IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuple=" FMT_TUPLE + " - unknown expect\n", + __func__, ct, ct->status, ARG_TUPLE(orig)); + return; + +alter: + /* Never alter conntrack for non-NAT conns */ + if (IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ) + nf_conntrack_alter_reply(ct, &new_reply); + ip_vs_conn_put(cp); + return; +} + +/* + * Create NF conntrack expectation with wildcard (optional) source port. + * Then the default callback function will alter the reply and will confirm + * the conntrack entry when the first packet comes. + */ +static void +ip_vs_expect_related(struct sk_buff *skb, struct nf_conn *ct, + struct ip_vs_conn *cp, u_int8_t proto, + const __be16 *port, int from_rs) +{ + struct nf_conntrack_expect *exp; + + BUG_ON(!ct || ct == &nf_conntrack_untracked); + + exp = nf_ct_expect_alloc(ct); + if (!exp) + return; + + if (from_rs) + nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, + nf_ct_l3num(ct), &cp->daddr, &cp->caddr, + proto, port, &cp->cport); + else + nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, + nf_ct_l3num(ct), &cp->caddr, &cp->vaddr, + proto, port, &cp->vport); + + exp->expectfn = ip_vs_expect_callback; + + IP_VS_DBG(7, "%s(): ct=%p, expect tuple=" FMT_TUPLE "\n", + __func__, ct, ARG_TUPLE(&exp->tuple)); + nf_ct_expect_related(exp); + nf_ct_expect_put(exp); +} /* * Look at outgoing ftp packets to catch the response to a PASV command @@ -149,7 +286,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, struct ip_vs_conn *n_cp; char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ unsigned buf_len; - int ret; + int ret = 0; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, @@ -219,19 +358,26 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, buf_len = strlen(buf); + ct = nf_ct_get(skb, &ctinfo); + if (ct && !nf_ct_is_untracked(ct)) { + /* If mangling fails this function will return 0 + * which will cause the packet to be dropped. + * Mangling can only fail under memory pressure, + * hopefully it will succeed on the retransmitted + * packet. + */ + ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, + start-data, end-start, + buf, buf_len); + if (ret) + ip_vs_expect_related(skb, ct, n_cp, + IPPROTO_TCP, NULL, 0); + } + /* - * Calculate required delta-offset to keep TCP happy + * Not setting 'diff' is intentional, otherwise the sequence + * would be adjusted twice. */ - *diff = buf_len - (end-start); - - if (*diff == 0) { - /* simply replace it with new passive address */ - memcpy(start, buf, buf_len); - ret = 1; - } else { - ret = !ip_vs_skb_replace(skb, GFP_ATOMIC, start, - end-start, buf, buf_len); - } cp->app_data = NULL; ip_vs_tcp_conn_listen(n_cp); @@ -263,6 +409,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, union nf_inet_addr to; __be16 port; struct ip_vs_conn *n_cp; + struct nf_conn *ct; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, @@ -349,6 +496,11 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, ip_vs_control_add(n_cp, cp); } + ct = (struct nf_conn *)skb->nfct; + if (ct && ct != &nf_conntrack_untracked) + ip_vs_expect_related(skb, ct, n_cp, + IPPROTO_TCP, &n_cp->dport, 1); + /* * Move tunnel to listen state */ diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 2d3d5e4b35f8f42f59c0cc0a838fab3903446a28..027f654799feb969fe5a64de8b262e37287e8f48 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -98,6 +98,7 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) return NULL; } +EXPORT_SYMBOL(ip_vs_proto_get); /* diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index c9a3f7a21d53b2de0ff597d18b0a37061e57f8a3..4c0855cb006ee93c721d53ff0b95cef5ec1853bf 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -8,55 +8,6 @@ #include #include - -static struct ip_vs_conn * -sctp_conn_in_get(int af, - const struct sk_buff *skb, - struct ip_vs_protocol *pp, - const struct ip_vs_iphdr *iph, - unsigned int proto_off, - int inverse) -{ - __be16 _ports[2], *pptr; - - pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); - if (pptr == NULL) - return NULL; - - if (likely(!inverse)) - return ip_vs_conn_in_get(af, iph->protocol, - &iph->saddr, pptr[0], - &iph->daddr, pptr[1]); - else - return ip_vs_conn_in_get(af, iph->protocol, - &iph->daddr, pptr[1], - &iph->saddr, pptr[0]); -} - -static struct ip_vs_conn * -sctp_conn_out_get(int af, - const struct sk_buff *skb, - struct ip_vs_protocol *pp, - const struct ip_vs_iphdr *iph, - unsigned int proto_off, - int inverse) -{ - __be16 _ports[2], *pptr; - - pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); - if (pptr == NULL) - return NULL; - - if (likely(!inverse)) - return ip_vs_conn_out_get(af, iph->protocol, - &iph->saddr, pptr[0], - &iph->daddr, pptr[1]); - else - return ip_vs_conn_out_get(af, iph->protocol, - &iph->daddr, pptr[1], - &iph->saddr, pptr[0]); -} - static int sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) @@ -173,7 +124,7 @@ sctp_dnat_handler(struct sk_buff *skb, return 0; /* Call application helper if needed */ - if (!ip_vs_app_pkt_out(cp, skb)) + if (!ip_vs_app_pkt_in(cp, skb)) return 0; } @@ -1169,8 +1120,8 @@ struct ip_vs_protocol ip_vs_protocol_sctp = { .register_app = sctp_register_app, .unregister_app = sctp_unregister_app, .conn_schedule = sctp_conn_schedule, - .conn_in_get = sctp_conn_in_get, - .conn_out_get = sctp_conn_out_get, + .conn_in_get = ip_vs_conn_in_get_proto, + .conn_out_get = ip_vs_conn_out_get_proto, .snat_handler = sctp_snat_handler, .dnat_handler = sctp_dnat_handler, .csum_check = sctp_csum_check, diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 91d28e073742db770bb5df2ee8ee123fb7afbc0a..282d24de8592e659466657533b10ad6eadf4bd5c 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -27,52 +27,6 @@ #include - -static struct ip_vs_conn * -tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, - const struct ip_vs_iphdr *iph, unsigned int proto_off, - int inverse) -{ - __be16 _ports[2], *pptr; - - pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); - if (pptr == NULL) - return NULL; - - if (likely(!inverse)) { - return ip_vs_conn_in_get(af, iph->protocol, - &iph->saddr, pptr[0], - &iph->daddr, pptr[1]); - } else { - return ip_vs_conn_in_get(af, iph->protocol, - &iph->daddr, pptr[1], - &iph->saddr, pptr[0]); - } -} - -static struct ip_vs_conn * -tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, - const struct ip_vs_iphdr *iph, unsigned int proto_off, - int inverse) -{ - __be16 _ports[2], *pptr; - - pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); - if (pptr == NULL) - return NULL; - - if (likely(!inverse)) { - return ip_vs_conn_out_get(af, iph->protocol, - &iph->saddr, pptr[0], - &iph->daddr, pptr[1]); - } else { - return ip_vs_conn_out_get(af, iph->protocol, - &iph->daddr, pptr[1], - &iph->saddr, pptr[0]); - } -} - - static int tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) @@ -721,8 +675,8 @@ struct ip_vs_protocol ip_vs_protocol_tcp = { .register_app = tcp_register_app, .unregister_app = tcp_unregister_app, .conn_schedule = tcp_conn_schedule, - .conn_in_get = tcp_conn_in_get, - .conn_out_get = tcp_conn_out_get, + .conn_in_get = ip_vs_conn_in_get_proto, + .conn_out_get = ip_vs_conn_out_get_proto, .snat_handler = tcp_snat_handler, .dnat_handler = tcp_dnat_handler, .csum_check = tcp_csum_check, diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index e7a6885e0167cc2a55d1fd4b71f2144c2ba46e54..8553231b5d412ca557f8699ee998e05351152213 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -27,58 +27,6 @@ #include #include -static struct ip_vs_conn * -udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, - const struct ip_vs_iphdr *iph, unsigned int proto_off, - int inverse) -{ - struct ip_vs_conn *cp; - __be16 _ports[2], *pptr; - - pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); - if (pptr == NULL) - return NULL; - - if (likely(!inverse)) { - cp = ip_vs_conn_in_get(af, iph->protocol, - &iph->saddr, pptr[0], - &iph->daddr, pptr[1]); - } else { - cp = ip_vs_conn_in_get(af, iph->protocol, - &iph->daddr, pptr[1], - &iph->saddr, pptr[0]); - } - - return cp; -} - - -static struct ip_vs_conn * -udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, - const struct ip_vs_iphdr *iph, unsigned int proto_off, - int inverse) -{ - struct ip_vs_conn *cp; - __be16 _ports[2], *pptr; - - pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); - if (pptr == NULL) - return NULL; - - if (likely(!inverse)) { - cp = ip_vs_conn_out_get(af, iph->protocol, - &iph->saddr, pptr[0], - &iph->daddr, pptr[1]); - } else { - cp = ip_vs_conn_out_get(af, iph->protocol, - &iph->daddr, pptr[1], - &iph->saddr, pptr[0]); - } - - return cp; -} - - static int udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) @@ -520,8 +468,8 @@ struct ip_vs_protocol ip_vs_protocol_udp = { .init = udp_init, .exit = udp_exit, .conn_schedule = udp_conn_schedule, - .conn_in_get = udp_conn_in_get, - .conn_out_get = udp_conn_out_get, + .conn_in_get = ip_vs_conn_in_get_proto, + .conn_out_get = ip_vs_conn_out_get_proto, .snat_handler = udp_snat_handler, .dnat_handler = udp_dnat_handler, .csum_check = udp_csum_check, diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 93c15a107b2ccde25d41e99e42e1a3aa28d86022..21e1a5e9b9d3cd354d74808e44094ffc67f671da 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -90,10 +91,10 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) &dest->addr.ip); return NULL; } - __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); + __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst)); IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n", &dest->addr.ip, - atomic_read(&rt->u.dst.__refcnt), rtos); + atomic_read(&rt->dst.__refcnt), rtos); } spin_unlock(&dest->dst_lock); } else { @@ -148,10 +149,10 @@ __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp) &dest->addr.in6); return NULL; } - __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst)); + __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst)); IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n", &dest->addr.in6, - atomic_read(&rt->u.dst.__refcnt)); + atomic_read(&rt->dst.__refcnt)); } spin_unlock(&dest->dst_lock); } else { @@ -198,7 +199,7 @@ do { \ (skb)->ipvs_property = 1; \ skb_forward_csum(skb); \ NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \ - (rt)->u.dst.dev, dst_output); \ + (rt)->dst.dev, dst_output); \ } while (0) @@ -245,7 +246,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, } /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { ip_rt_put(rt); icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); @@ -265,7 +266,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; @@ -309,9 +310,9 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, } /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if (skb->len > mtu) { - dst_release(&rt->u.dst); + dst_release(&rt->dst); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; @@ -323,13 +324,13 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(skb == NULL)) { - dst_release(&rt->u.dst); + dst_release(&rt->dst); return NF_STOLEN; } /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; @@ -348,6 +349,30 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, } #endif +static void +ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) +{ + struct nf_conn *ct = (struct nf_conn *)skb->nfct; + struct nf_conntrack_tuple new_tuple; + + if (ct == NULL || nf_ct_is_untracked(ct) || nf_ct_is_confirmed(ct)) + return; + + /* + * The connection is not yet in the hashtable, so we update it. + * CIP->VIP will remain the same, so leave the tuple in + * IP_CT_DIR_ORIGINAL untouched. When the reply comes back from the + * real-server we will see RIP->DIP. + */ + new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; + new_tuple.src.u3 = cp->daddr; + /* + * This will also take care of UDP and other protocols. + */ + new_tuple.src.u.tcp.port = cp->dport; + nf_conntrack_alter_reply(ct, &new_tuple); +} + /* * NAT transmitter (only for outside-to-inside nat forwarding) * Not used for related ICMP @@ -376,7 +401,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, goto tx_error_icmp; /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { ip_rt_put(rt); icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); @@ -388,12 +413,12 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, if (!skb_make_writable(skb, sizeof(struct iphdr))) goto tx_error_put; - if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) + if (skb_cow(skb, rt->dst.dev->hard_header_len)) goto tx_error_put; /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* mangle the packet */ if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) @@ -403,6 +428,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); + ip_vs_update_conntrack(skb, cp); + /* FIXME: when application helper enlarges the packet and the length is larger than the MTU of outgoing device, there will be still MTU problem. */ @@ -452,9 +479,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, goto tx_error_icmp; /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if (skb->len > mtu) { - dst_release(&rt->u.dst); + dst_release(&rt->dst); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit_v6(): frag needed for"); @@ -465,12 +492,12 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) goto tx_error_put; - if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) + if (skb_cow(skb, rt->dst.dev->hard_header_len)) goto tx_error_put; /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* mangle the packet */ if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) @@ -479,6 +506,8 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); + ip_vs_update_conntrack(skb, cp); + /* FIXME: when application helper enlarges the packet and the length is larger than the MTU of outgoing device, there will be still MTU problem. */ @@ -498,7 +527,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, kfree_skb(skb); return NF_STOLEN; tx_error_put: - dst_release(&rt->u.dst); + dst_release(&rt->dst); goto tx_error; } #endif @@ -549,9 +578,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos)))) goto tx_error_icmp; - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; - mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); + mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); if (mtu < 68) { ip_rt_put(rt); IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); @@ -601,7 +630,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. @@ -615,7 +644,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; iph->ttl = old_iph->ttl; - ip_select_ident(iph, &rt->u.dst, NULL); + ip_select_ident(iph, &rt->dst, NULL); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; @@ -660,12 +689,12 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, if (!rt) goto tx_error_icmp; - tdev = rt->u.dst.dev; + tdev = rt->dst.dev; - mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr); + mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr); /* TODO IPv6: do we need this check in IPv6? */ if (mtu < 1280) { - dst_release(&rt->u.dst); + dst_release(&rt->dst); IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__); goto tx_error; } @@ -674,7 +703,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); - dst_release(&rt->u.dst); + dst_release(&rt->dst); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; } @@ -689,7 +718,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { - dst_release(&rt->u.dst); + dst_release(&rt->dst); kfree_skb(skb); IP_VS_ERR_RL("%s(): no memory\n", __func__); return NF_STOLEN; @@ -707,7 +736,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. @@ -760,7 +789,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, goto tx_error_icmp; /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) { icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); ip_rt_put(rt); @@ -780,7 +809,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; @@ -813,10 +842,10 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, goto tx_error_icmp; /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); - dst_release(&rt->u.dst); + dst_release(&rt->dst); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; } @@ -827,13 +856,13 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(skb == NULL)) { - dst_release(&rt->u.dst); + dst_release(&rt->dst); return NF_STOLEN; } /* drop old route */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; @@ -888,7 +917,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, goto tx_error_icmp; /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { ip_rt_put(rt); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); @@ -900,12 +929,12 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, if (!skb_make_writable(skb, offset)) goto tx_error_put; - if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) + if (skb_cow(skb, rt->dst.dev->hard_header_len)) goto tx_error_put; /* drop the old route when skb is not shared */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); ip_vs_nat_icmp(skb, pp, cp, 0); @@ -963,9 +992,9 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, goto tx_error_icmp; /* MTU checking */ - mtu = dst_mtu(&rt->u.dst); + mtu = dst_mtu(&rt->dst); if (skb->len > mtu) { - dst_release(&rt->u.dst); + dst_release(&rt->dst); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; @@ -975,12 +1004,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, if (!skb_make_writable(skb, offset)) goto tx_error_put; - if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) + if (skb_cow(skb, rt->dst.dev->hard_header_len)) goto tx_error_put; /* drop the old route when skb is not shared */ skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); + skb_dst_set(skb, &rt->dst); ip_vs_nat_icmp_v6(skb, pp, cp, 0); @@ -1001,7 +1030,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, LeaveFunction(10); return rc; tx_error_put: - dst_release(&rt->u.dst); + dst_release(&rt->dst); goto tx_error; } #endif diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c index ab81b380eae6814934de4318c67e6cd4d886ca1f..5178c691ecbf448a0ebd7d6e2f656b2118c6efe5 100644 --- a/net/netfilter/nf_conntrack_acct.c +++ b/net/netfilter/nf_conntrack_acct.c @@ -17,13 +17,7 @@ #include #include -#ifdef CONFIG_NF_CT_ACCT -#define NF_CT_ACCT_DEFAULT 1 -#else -#define NF_CT_ACCT_DEFAULT 0 -#endif - -static int nf_ct_acct __read_mostly = NF_CT_ACCT_DEFAULT; +static int nf_ct_acct __read_mostly; module_param_named(acct, nf_ct_acct, bool, 0644); MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting."); @@ -114,12 +108,6 @@ int nf_conntrack_acct_init(struct net *net) net->ct.sysctl_acct = nf_ct_acct; if (net_eq(net, &init_net)) { -#ifdef CONFIG_NF_CT_ACCT - printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Please use\n"); - printk(KERN_WARNING "nf_conntrack.acct=1 kernel parameter, acct=1 nf_conntrack module option or\n"); - printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n"); -#endif - ret = nf_ct_extend_register(&acct_extend); if (ret < 0) { printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n"); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index eeeb8bc73982275655e37342c214f1fa2669efb2..df3eedb142ff809b9ce8f9dd49c41782f91aba42 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -62,8 +62,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); -struct nf_conn nf_conntrack_untracked __read_mostly; -EXPORT_SYMBOL_GPL(nf_conntrack_untracked); +DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); +EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); static int nf_conntrack_hash_rnd_initted; static unsigned int nf_conntrack_hash_rnd; @@ -619,9 +619,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; /* Don't set timer yet: wait for confirmation */ setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); -#ifdef CONFIG_NET_NS - ct->ct_net = net; -#endif + write_pnet(&ct->ct_net, net); #ifdef CONFIG_NF_CONNTRACK_ZONES if (zone) { struct nf_conntrack_zone *nf_ct_zone; @@ -968,8 +966,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, if (acct) { spin_lock_bh(&ct->lock); acct[CTINFO2DIR(ctinfo)].packets++; - acct[CTINFO2DIR(ctinfo)].bytes += - skb->len - skb_network_offset(skb); + acct[CTINFO2DIR(ctinfo)].bytes += skb->len; spin_unlock_bh(&ct->lock); } } @@ -1183,10 +1180,21 @@ static void nf_ct_release_dying_list(struct net *net) spin_unlock_bh(&nf_conntrack_lock); } +static int untrack_refs(void) +{ + int cnt = 0, cpu; + + for_each_possible_cpu(cpu) { + struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); + + cnt += atomic_read(&ct->ct_general.use) - 1; + } + return cnt; +} + static void nf_conntrack_cleanup_init_net(void) { - /* wait until all references to nf_conntrack_untracked are dropped */ - while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) + while (untrack_refs() > 0) schedule(); nf_conntrack_helper_fini(); @@ -1321,10 +1329,19 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, &nf_conntrack_htable_size, 0600); +void nf_ct_untracked_status_or(unsigned long bits) +{ + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(nf_conntrack_untracked, cpu).status |= bits; +} +EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); + static int nf_conntrack_init_init_net(void) { int max_factor = 8; - int ret; + int ret, cpu; /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ @@ -1363,13 +1380,13 @@ static int nf_conntrack_init_init_net(void) goto err_extend; #endif /* Set up fake conntrack: to never be deleted, not in any hashes */ -#ifdef CONFIG_NET_NS - nf_conntrack_untracked.ct_net = &init_net; -#endif - atomic_set(&nf_conntrack_untracked.ct_general.use, 1); + for_each_possible_cpu(cpu) { + struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); + write_pnet(&ct->ct_net, &init_net); + atomic_set(&ct->ct_general.use, 1); + } /* - and look it like as a confirmed connection */ - set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); - + nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); return 0; #ifdef CONFIG_NF_CONNTRACK_ZONES diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index fdc8fb4ae10f0346424b112752581068ccdbc340..7dcf7a404190e6aa3fa06e642f54279e2f30fba9 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -23,9 +23,10 @@ void __nf_ct_ext_destroy(struct nf_conn *ct) { unsigned int i; struct nf_ct_ext_type *t; + struct nf_ct_ext *ext = ct->ext; for (i = 0; i < NF_CT_EXT_NUM; i++) { - if (!nf_ct_ext_exist(ct, i)) + if (!__nf_ct_ext_exist(ext, i)) continue; rcu_read_lock(); @@ -73,44 +74,45 @@ static void __nf_ct_ext_free_rcu(struct rcu_head *head) void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) { - struct nf_ct_ext *new; + struct nf_ct_ext *old, *new; int i, newlen, newoff; struct nf_ct_ext_type *t; /* Conntrack must not be confirmed to avoid races on reallocation. */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); - if (!ct->ext) + old = ct->ext; + if (!old) return nf_ct_ext_create(&ct->ext, id, gfp); - if (nf_ct_ext_exist(ct, id)) + if (__nf_ct_ext_exist(old, id)) return NULL; rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); BUG_ON(t == NULL); - newoff = ALIGN(ct->ext->len, t->align); + newoff = ALIGN(old->len, t->align); newlen = newoff + t->len; rcu_read_unlock(); - new = __krealloc(ct->ext, newlen, gfp); + new = __krealloc(old, newlen, gfp); if (!new) return NULL; - if (new != ct->ext) { + if (new != old) { for (i = 0; i < NF_CT_EXT_NUM; i++) { - if (!nf_ct_ext_exist(ct, i)) + if (!__nf_ct_ext_exist(old, i)) continue; rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[i]); if (t && t->move) t->move((void *)new + new->offset[i], - (void *)ct->ext + ct->ext->offset[i]); + (void *)old + old->offset[i]); rcu_read_unlock(); } - call_rcu(&ct->ext->rcu, __nf_ct_ext_free_rcu); + call_rcu(&old->rcu, __nf_ct_ext_free_rcu); ct->ext = new; } diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 6eaee7c8a3375843eb2631ac1d79d155c2d12c1c..b969025cf82fe35437c20933cf860525eeda3d7d 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -734,11 +734,11 @@ static int callforward_do_filter(const union nf_inet_addr *src, if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { if (rt1->rt_gateway == rt2->rt_gateway && - rt1->u.dst.dev == rt2->u.dst.dev) + rt1->dst.dev == rt2->dst.dev) ret = 1; - dst_release(&rt2->u.dst); + dst_release(&rt2->dst); } - dst_release(&rt1->u.dst); + dst_release(&rt1->dst); } break; } @@ -753,11 +753,11 @@ static int callforward_do_filter(const union nf_inet_addr *src, if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, sizeof(rt1->rt6i_gateway)) && - rt1->u.dst.dev == rt2->u.dst.dev) + rt1->dst.dev == rt2->dst.dev) ret = 1; - dst_release(&rt2->u.dst); + dst_release(&rt2->dst); } - dst_release(&rt1->u.dst); + dst_release(&rt1->dst); } break; } diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 497b2224536fe6fe29ec904dabda14b7a55afc89..aadde018a0720bd6e6a35dea4d639eb3d1eab293 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c @@ -61,7 +61,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, goto out; rcu_read_lock(); - in_dev = __in_dev_get_rcu(rt->u.dst.dev); + in_dev = __in_dev_get_rcu(rt->dst.dev); if (in_dev != NULL) { for_primary_ifa(in_dev) { if (ifa->ifa_broadcast == iph->daddr) { diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c42ff6aa441d657c03e2eb0207350e50b5adc239..5bae1cd15eea93ee3f74cb51dab972c10c96d33c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -480,7 +480,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) int err; /* ignore our fake conntrack entry */ - if (ct == &nf_conntrack_untracked) + if (nf_ct_is_untracked(ct)) return 0; if (events & (1 << IPCT_DESTROY)) { diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 9dd8cd4fb6e64bf1df3c24850c51ac6955d655c8..c4c885dca3bd358a320f1861d8ea9e52bd8166d3 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -585,8 +585,16 @@ static bool tcp_in_window(const struct nf_conn *ct, * Let's try to use the data from the packet. */ sender->td_end = end; + win <<= sender->td_scale; sender->td_maxwin = (win == 0 ? 1 : win); sender->td_maxend = end + sender->td_maxwin; + /* + * We haven't seen traffic in the other direction yet + * but we have to tweak window tracking to pass III + * and IV until that happens. + */ + if (receiver->td_maxwin == 0) + receiver->td_end = receiver->td_maxend = sack; } } else if (((state->state == TCP_CONNTRACK_SYN_SENT && dir == IP_CT_DIR_ORIGINAL) @@ -680,7 +688,7 @@ static bool tcp_in_window(const struct nf_conn *ct, /* * Update receiver data. */ - if (after(end, sender->td_maxend)) + if (receiver->td_maxwin != 0 && after(end, sender->td_maxend)) receiver->td_maxwin += end - sender->td_maxend; if (after(sack + win, receiver->td_maxend - 1)) { receiver->td_maxend = sack + win; @@ -736,27 +744,19 @@ static bool tcp_in_window(const struct nf_conn *ct, return res; } -#define TH_FIN 0x01 -#define TH_SYN 0x02 -#define TH_RST 0x04 -#define TH_PUSH 0x08 -#define TH_ACK 0x10 -#define TH_URG 0x20 -#define TH_ECE 0x40 -#define TH_CWR 0x80 - /* table of valid flag combinations - PUSH, ECE and CWR are always valid */ -static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] = +static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK| + TCPHDR_URG) + 1] = { - [TH_SYN] = 1, - [TH_SYN|TH_URG] = 1, - [TH_SYN|TH_ACK] = 1, - [TH_RST] = 1, - [TH_RST|TH_ACK] = 1, - [TH_FIN|TH_ACK] = 1, - [TH_FIN|TH_ACK|TH_URG] = 1, - [TH_ACK] = 1, - [TH_ACK|TH_URG] = 1, + [TCPHDR_SYN] = 1, + [TCPHDR_SYN|TCPHDR_URG] = 1, + [TCPHDR_SYN|TCPHDR_ACK] = 1, + [TCPHDR_RST] = 1, + [TCPHDR_RST|TCPHDR_ACK] = 1, + [TCPHDR_FIN|TCPHDR_ACK] = 1, + [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1, + [TCPHDR_ACK] = 1, + [TCPHDR_ACK|TCPHDR_URG] = 1, }; /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ @@ -803,7 +803,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl, } /* Check TCP flags. */ - tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH)); + tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH)); if (!tcp_valid_flags[tcpflags]) { if (LOG_INVALID(net, IPPROTO_TCP)) nf_log_packet(pf, 0, skb, NULL, NULL, NULL, diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index fc9a211e629e499bbc15eed8f593157a284cc1b7..6a1572b0ab416a65425abc135cca9d93a4769911 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -66,9 +66,10 @@ struct nfulnl_instance { u_int16_t group_num; /* number of this queue */ u_int16_t flags; u_int8_t copy_mode; + struct rcu_head rcu; }; -static DEFINE_RWLOCK(instances_lock); +static DEFINE_SPINLOCK(instances_lock); static atomic_t global_seq; #define INSTANCE_BUCKETS 16 @@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num) struct nfulnl_instance *inst; head = &instance_table[instance_hashfn(group_num)]; - hlist_for_each_entry(inst, pos, head, hlist) { + hlist_for_each_entry_rcu(inst, pos, head, hlist) { if (inst->group_num == group_num) return inst; } @@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num) { struct nfulnl_instance *inst; - read_lock_bh(&instances_lock); + rcu_read_lock_bh(); inst = __instance_lookup(group_num); - if (inst) - instance_get(inst); - read_unlock_bh(&instances_lock); + if (inst && !atomic_inc_not_zero(&inst->use)) + inst = NULL; + rcu_read_unlock_bh(); return inst; } +static void nfulnl_instance_free_rcu(struct rcu_head *head) +{ + kfree(container_of(head, struct nfulnl_instance, rcu)); + module_put(THIS_MODULE); +} + static void instance_put(struct nfulnl_instance *inst) { - if (inst && atomic_dec_and_test(&inst->use)) { - kfree(inst); - module_put(THIS_MODULE); - } + if (inst && atomic_dec_and_test(&inst->use)) + call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu); } static void nfulnl_timer(unsigned long data); @@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid) struct nfulnl_instance *inst; int err; - write_lock_bh(&instances_lock); + spin_lock_bh(&instances_lock); if (__instance_lookup(group_num)) { err = -EEXIST; goto out_unlock; @@ -166,32 +171,37 @@ instance_create(u_int16_t group_num, int pid) inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_range = NFULNL_COPY_RANGE_MAX; - hlist_add_head(&inst->hlist, + hlist_add_head_rcu(&inst->hlist, &instance_table[instance_hashfn(group_num)]); - write_unlock_bh(&instances_lock); + spin_unlock_bh(&instances_lock); return inst; out_unlock: - write_unlock_bh(&instances_lock); + spin_unlock_bh(&instances_lock); return ERR_PTR(err); } static void __nfulnl_flush(struct nfulnl_instance *inst); +/* called with BH disabled */ static void __instance_destroy(struct nfulnl_instance *inst) { /* first pull it out of the global list */ - hlist_del(&inst->hlist); + hlist_del_rcu(&inst->hlist); /* then flush all pending packets from skb */ - spin_lock_bh(&inst->lock); + spin_lock(&inst->lock); + + /* lockless readers wont be able to use us */ + inst->copy_mode = NFULNL_COPY_DISABLED; + if (inst->skb) __nfulnl_flush(inst); - spin_unlock_bh(&inst->lock); + spin_unlock(&inst->lock); /* and finally put the refcount */ instance_put(inst); @@ -200,9 +210,9 @@ __instance_destroy(struct nfulnl_instance *inst) static inline void instance_destroy(struct nfulnl_instance *inst) { - write_lock_bh(&instances_lock); + spin_lock_bh(&instances_lock); __instance_destroy(inst); - write_unlock_bh(&instances_lock); + spin_unlock_bh(&instances_lock); } static int @@ -403,8 +413,9 @@ __build_packet_message(struct nfulnl_instance *inst, NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)); /* this is the bridge group "brX" */ + /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, - htonl(indev->br_port->br->dev->ifindex)); + htonl(br_port_get_rcu(indev)->br->dev->ifindex)); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ @@ -430,8 +441,9 @@ __build_packet_message(struct nfulnl_instance *inst, NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)); /* this is the bridge group "brX" */ + /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, - htonl(outdev->br_port->br->dev->ifindex)); + htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); } else { /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ @@ -619,6 +631,7 @@ nfulnl_log_packet(u_int8_t pf, size += nla_total_size(data_len); break; + case NFULNL_COPY_DISABLED: default: goto unlock_and_release; } @@ -672,7 +685,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this, int i; /* destroy all instances for this pid */ - write_lock_bh(&instances_lock); + spin_lock_bh(&instances_lock); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp, *t2; struct nfulnl_instance *inst; @@ -684,7 +697,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this, __instance_destroy(inst); } } - write_unlock_bh(&instances_lock); + spin_unlock_bh(&instances_lock); } return NOTIFY_DONE; } @@ -861,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st) for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { if (!hlist_empty(&instance_table[st->bucket])) - return instance_table[st->bucket].first; + return rcu_dereference_bh(instance_table[st->bucket].first); } return NULL; } static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) { - h = h->next; + h = rcu_dereference_bh(h->next); while (!h) { if (++st->bucket >= INSTANCE_BUCKETS) return NULL; - h = instance_table[st->bucket].first; + h = rcu_dereference_bh(instance_table[st->bucket].first); } return h; } @@ -890,9 +903,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos) } static void *seq_start(struct seq_file *seq, loff_t *pos) - __acquires(instances_lock) + __acquires(rcu_bh) { - read_lock_bh(&instances_lock); + rcu_read_lock_bh(); return get_idx(seq->private, *pos); } @@ -903,9 +916,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos) } static void seq_stop(struct seq_file *s, void *v) - __releases(instances_lock) + __releases(rcu_bh) { - read_unlock_bh(&instances_lock); + rcu_read_unlock_bh(); } static int seq_show(struct seq_file *s, void *v) diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 12e1ab37fcd8f400151f8bced1b13a873b328e77..68e67d19724d83f844c25a5aa0f603156421f57e 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -46,17 +46,19 @@ struct nfqnl_instance { int peer_pid; unsigned int queue_maxlen; unsigned int copy_range; - unsigned int queue_total; unsigned int queue_dropped; unsigned int queue_user_dropped; - unsigned int id_sequence; /* 'sequence' of pkt ids */ u_int16_t queue_num; /* number of this queue */ u_int8_t copy_mode; - - spinlock_t lock; - +/* + * Following fields are dirtied for each queued packet, + * keep them in same cache line if possible. + */ + spinlock_t lock; + unsigned int queue_total; + atomic_t id_sequence; /* 'sequence' of pkt ids */ struct list_head queue_list; /* packets in queue */ }; @@ -238,32 +240,24 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, outdev = entry->outdev; - spin_lock_bh(&queue->lock); - - switch ((enum nfqnl_config_mode)queue->copy_mode) { + switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { case NFQNL_COPY_META: case NFQNL_COPY_NONE: break; case NFQNL_COPY_PACKET: if (entskb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_help(entskb)) { - spin_unlock_bh(&queue->lock); + skb_checksum_help(entskb)) return NULL; - } - if (queue->copy_range == 0 - || queue->copy_range > entskb->len) + + data_len = ACCESS_ONCE(queue->copy_range); + if (data_len == 0 || data_len > entskb->len) data_len = entskb->len; - else - data_len = queue->copy_range; size += nla_total_size(data_len); break; } - entry->id = queue->id_sequence++; - - spin_unlock_bh(&queue->lock); skb = alloc_skb(size, GFP_ATOMIC); if (!skb) @@ -278,6 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(queue->queue_num); + entry->id = atomic_inc_return(&queue->id_sequence); pmsg.packet_id = htonl(entry->id); pmsg.hw_protocol = entskb->protocol; pmsg.hook = entry->hook; @@ -296,8 +291,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)); /* this is the bridge group "brX" */ + /* rcu_read_lock()ed by __nf_queue */ NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, - htonl(indev->br_port->br->dev->ifindex)); + htonl(br_port_get_rcu(indev)->br->dev->ifindex)); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ @@ -321,8 +317,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)); /* this is the bridge group "brX" */ + /* rcu_read_lock()ed by __nf_queue */ NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, - htonl(outdev->br_port->br->dev->ifindex)); + htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); } else { /* Case 2: outdev is bridge group, we need to look for * physical output device (when called from ipv4) */ @@ -866,7 +863,7 @@ static int seq_show(struct seq_file *s, void *v) inst->peer_pid, inst->queue_total, inst->copy_mode, inst->copy_range, inst->queue_dropped, inst->queue_user_dropped, - inst->id_sequence, 1); + atomic_read(&inst->id_sequence), 1); } static const struct seq_operations nfqnl_seq_ops = { diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c new file mode 100644 index 0000000000000000000000000000000000000000..0f642ef8cd2669e737dd0cdb5958abcced8be7d2 --- /dev/null +++ b/net/netfilter/xt_CHECKSUM.c @@ -0,0 +1,70 @@ +/* iptables module for the packet checksum mangling + * + * (C) 2002 by Harald Welte + * (C) 2010 Red Hat, Inc. + * + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include + +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michael S. Tsirkin "); +MODULE_DESCRIPTION("Xtables: checksum modification"); +MODULE_ALIAS("ipt_CHECKSUM"); +MODULE_ALIAS("ip6t_CHECKSUM"); + +static unsigned int +checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) + skb_checksum_help(skb); + + return XT_CONTINUE; +} + +static int checksum_tg_check(const struct xt_tgchk_param *par) +{ + const struct xt_CHECKSUM_info *einfo = par->targinfo; + + if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { + pr_info("unsupported CHECKSUM operation %x\n", einfo->operation); + return -EINVAL; + } + if (!einfo->operation) { + pr_info("no CHECKSUM operation enabled\n"); + return -EINVAL; + } + return 0; +} + +static struct xt_target checksum_tg_reg __read_mostly = { + .name = "CHECKSUM", + .family = NFPROTO_UNSPEC, + .target = checksum_tg, + .targetsize = sizeof(struct xt_CHECKSUM_info), + .table = "mangle", + .checkentry = checksum_tg_check, + .me = THIS_MODULE, +}; + +static int __init checksum_tg_init(void) +{ + return xt_register_target(&checksum_tg_reg); +} + +static void __exit checksum_tg_exit(void) +{ + xt_unregister_target(&checksum_tg_reg); +} + +module_init(checksum_tg_init); +module_exit(checksum_tg_exit); diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 562bf3266e043d421621ee7472aad72b50b0da51..0cb6053f02fdf04723254bfe90d8ca9bff29edfc 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -67,7 +67,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par) return -EINVAL; if (info->flags & XT_CT_NOTRACK) { - ct = &nf_conntrack_untracked; + ct = nf_ct_untracked_get(); atomic_inc(&ct->ct_general.use); goto out; } @@ -132,7 +132,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) struct nf_conn *ct = info->ct; struct nf_conn_help *help; - if (ct != &nf_conntrack_untracked) { + if (!nf_ct_is_untracked(ct)) { help = nfct_help(ct); if (help) module_put(help->helper->me); diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c new file mode 100644 index 0000000000000000000000000000000000000000..be1f22e13545055601a7feca73a6085361216d82 --- /dev/null +++ b/net/netfilter/xt_IDLETIMER.c @@ -0,0 +1,315 @@ +/* + * linux/net/netfilter/xt_IDLETIMER.c + * + * Netfilter module to trigger a timer when packet matches. + * After timer expires a kevent will be sent. + * + * Copyright (C) 2004, 2010 Nokia Corporation + * Written by Timo Teras + * + * Converted to x_tables and reworked for upstream inclusion + * by Luciano Coelho + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct idletimer_tg_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); +}; + +struct idletimer_tg { + struct list_head entry; + struct timer_list timer; + struct work_struct work; + + struct kobject *kobj; + struct idletimer_tg_attr attr; + + unsigned int refcnt; +}; + +static LIST_HEAD(idletimer_tg_list); +static DEFINE_MUTEX(list_mutex); + +static struct kobject *idletimer_tg_kobj; + +static +struct idletimer_tg *__idletimer_tg_find_by_label(const char *label) +{ + struct idletimer_tg *entry; + + BUG_ON(!label); + + list_for_each_entry(entry, &idletimer_tg_list, entry) { + if (!strcmp(label, entry->attr.attr.name)) + return entry; + } + + return NULL; +} + +static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct idletimer_tg *timer; + unsigned long expires = 0; + + mutex_lock(&list_mutex); + + timer = __idletimer_tg_find_by_label(attr->name); + if (timer) + expires = timer->timer.expires; + + mutex_unlock(&list_mutex); + + if (time_after(expires, jiffies)) + return sprintf(buf, "%u\n", + jiffies_to_msecs(expires - jiffies) / 1000); + + return sprintf(buf, "0\n"); +} + +static void idletimer_tg_work(struct work_struct *work) +{ + struct idletimer_tg *timer = container_of(work, struct idletimer_tg, + work); + + sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name); +} + +static void idletimer_tg_expired(unsigned long data) +{ + struct idletimer_tg *timer = (struct idletimer_tg *) data; + + pr_debug("timer %s expired\n", timer->attr.attr.name); + + schedule_work(&timer->work); +} + +static int idletimer_tg_create(struct idletimer_tg_info *info) +{ + int ret; + + info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL); + if (!info->timer) { + pr_debug("couldn't alloc timer\n"); + ret = -ENOMEM; + goto out; + } + + info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); + if (!info->timer->attr.attr.name) { + pr_debug("couldn't alloc attribute name\n"); + ret = -ENOMEM; + goto out_free_timer; + } + info->timer->attr.attr.mode = S_IRUGO; + info->timer->attr.show = idletimer_tg_show; + + ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr); + if (ret < 0) { + pr_debug("couldn't add file to sysfs"); + goto out_free_attr; + } + + list_add(&info->timer->entry, &idletimer_tg_list); + + setup_timer(&info->timer->timer, idletimer_tg_expired, + (unsigned long) info->timer); + info->timer->refcnt = 1; + + mod_timer(&info->timer->timer, + msecs_to_jiffies(info->timeout * 1000) + jiffies); + + INIT_WORK(&info->timer->work, idletimer_tg_work); + + return 0; + +out_free_attr: + kfree(info->timer->attr.attr.name); +out_free_timer: + kfree(info->timer); +out: + return ret; +} + +/* + * The actual xt_tables plugin. + */ +static unsigned int idletimer_tg_target(struct sk_buff *skb, + const struct xt_action_param *par) +{ + const struct idletimer_tg_info *info = par->targinfo; + + pr_debug("resetting timer %s, timeout period %u\n", + info->label, info->timeout); + + BUG_ON(!info->timer); + + mod_timer(&info->timer->timer, + msecs_to_jiffies(info->timeout * 1000) + jiffies); + + return XT_CONTINUE; +} + +static int idletimer_tg_checkentry(const struct xt_tgchk_param *par) +{ + struct idletimer_tg_info *info = par->targinfo; + int ret; + + pr_debug("checkentry targinfo%s\n", info->label); + + if (info->timeout == 0) { + pr_debug("timeout value is zero\n"); + return -EINVAL; + } + + if (info->label[0] == '\0' || + strnlen(info->label, + MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { + pr_debug("label is empty or not nul-terminated\n"); + return -EINVAL; + } + + mutex_lock(&list_mutex); + + info->timer = __idletimer_tg_find_by_label(info->label); + if (info->timer) { + info->timer->refcnt++; + mod_timer(&info->timer->timer, + msecs_to_jiffies(info->timeout * 1000) + jiffies); + + pr_debug("increased refcnt of timer %s to %u\n", + info->label, info->timer->refcnt); + } else { + ret = idletimer_tg_create(info); + if (ret < 0) { + pr_debug("failed to create timer\n"); + mutex_unlock(&list_mutex); + return ret; + } + } + + mutex_unlock(&list_mutex); + return 0; +} + +static void idletimer_tg_destroy(const struct xt_tgdtor_param *par) +{ + const struct idletimer_tg_info *info = par->targinfo; + + pr_debug("destroy targinfo %s\n", info->label); + + mutex_lock(&list_mutex); + + if (--info->timer->refcnt == 0) { + pr_debug("deleting timer %s\n", info->label); + + list_del(&info->timer->entry); + del_timer_sync(&info->timer->timer); + sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); + kfree(info->timer->attr.attr.name); + kfree(info->timer); + } else { + pr_debug("decreased refcnt of timer %s to %u\n", + info->label, info->timer->refcnt); + } + + mutex_unlock(&list_mutex); +} + +static struct xt_target idletimer_tg __read_mostly = { + .name = "IDLETIMER", + .family = NFPROTO_UNSPEC, + .target = idletimer_tg_target, + .targetsize = sizeof(struct idletimer_tg_info), + .checkentry = idletimer_tg_checkentry, + .destroy = idletimer_tg_destroy, + .me = THIS_MODULE, +}; + +static struct class *idletimer_tg_class; + +static struct device *idletimer_tg_device; + +static int __init idletimer_tg_init(void) +{ + int err; + + idletimer_tg_class = class_create(THIS_MODULE, "xt_idletimer"); + err = PTR_ERR(idletimer_tg_class); + if (IS_ERR(idletimer_tg_class)) { + pr_debug("couldn't register device class\n"); + goto out; + } + + idletimer_tg_device = device_create(idletimer_tg_class, NULL, + MKDEV(0, 0), NULL, "timers"); + err = PTR_ERR(idletimer_tg_device); + if (IS_ERR(idletimer_tg_device)) { + pr_debug("couldn't register system device\n"); + goto out_class; + } + + idletimer_tg_kobj = &idletimer_tg_device->kobj; + + err = xt_register_target(&idletimer_tg); + if (err < 0) { + pr_debug("couldn't register xt target\n"); + goto out_dev; + } + + return 0; +out_dev: + device_destroy(idletimer_tg_class, MKDEV(0, 0)); +out_class: + class_destroy(idletimer_tg_class); +out: + return err; +} + +static void __exit idletimer_tg_exit(void) +{ + xt_unregister_target(&idletimer_tg); + + device_destroy(idletimer_tg_class, MKDEV(0, 0)); + class_destroy(idletimer_tg_class); +} + +module_init(idletimer_tg_init); +module_exit(idletimer_tg_exit); + +MODULE_AUTHOR("Timo Teras "); +MODULE_AUTHOR("Luciano Coelho "); +MODULE_DESCRIPTION("Xtables: idle time monitor"); +MODULE_LICENSE("GPL v2"); diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c index 512b9123252f0a0caa625a0a43cead965865864c..9d782181b6c8993236cec126c131949cbd3ee0a2 100644 --- a/net/netfilter/xt_NOTRACK.c +++ b/net/netfilter/xt_NOTRACK.c @@ -23,7 +23,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) If there is a real ct entry correspondig to this packet, it'll hang aroun till timing out. We don't deal with it for performance reasons. JK */ - skb->nfct = &nf_conntrack_untracked.ct_general; + skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 69c01e10f8afe00ebcdefd1becdec4875fa84a06..de079abd5bc873ae270abdfaa8d9c28af7df11ba 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -60,13 +60,22 @@ struct xt_rateest *xt_rateest_lookup(const char *name) } EXPORT_SYMBOL_GPL(xt_rateest_lookup); +static void xt_rateest_free_rcu(struct rcu_head *head) +{ + kfree(container_of(head, struct xt_rateest, rcu)); +} + void xt_rateest_put(struct xt_rateest *est) { mutex_lock(&xt_rateest_mutex); if (--est->refcnt == 0) { hlist_del(&est->list); gen_kill_estimator(&est->bstats, &est->rstats); - kfree(est); + /* + * gen_estimator est_timer() might access est->lock or bstats, + * wait a RCU grace period before freeing 'est' + */ + call_rcu(&est->rcu, xt_rateest_free_rcu); } mutex_unlock(&xt_rateest_mutex); } @@ -179,6 +188,7 @@ static int __init xt_rateest_tg_init(void) static void __exit xt_rateest_tg_fini(void) { xt_unregister_target(&xt_rateest_tg_reg); + rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */ } diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 62ec021fbd50ddd278da5f9f53756587d83fb786..eb81c380da1ba11d2f212499f43636ef09f9a327 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -165,8 +165,8 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, rcu_read_unlock(); if (rt != NULL) { - mtu = dst_mtu(&rt->u.dst); - dst_release(&rt->u.dst); + mtu = dst_mtu(&rt->dst); + dst_release(&rt->dst); } return mtu; } @@ -220,15 +220,13 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) } #endif -#define TH_SYN 0x02 - /* Must specify -p tcp --syn */ static inline bool find_syn_match(const struct xt_entry_match *m) { const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data; if (strcmp(m->u.kernel.match->name, "tcp") == 0 && - tcpinfo->flg_cmp & TH_SYN && + tcpinfo->flg_cmp & TCPHDR_SYN && !(tcpinfo->invflags & XT_TCP_INV_FLAGS)) return true; diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index 859d9fd429c812a560cc3676b1a62c6105026fe7..22a2d421e7ebc5172761d51caea4298d47d7ebfb 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -77,8 +77,8 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) return false; skb_dst_drop(skb); - skb_dst_set(skb, &rt->u.dst); - skb->dev = rt->u.dst.dev; + skb_dst_set(skb, &rt->dst); + skb->dev = rt->dst.dev; skb->protocol = htons(ETH_P_IP); return true; } @@ -104,7 +104,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) #ifdef WITH_CONNTRACK /* Avoid counting cloned packets towards the original connection. */ nf_conntrack_put(skb->nfct); - skb->nfct = &nf_conntrack_untracked.ct_general; + skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif @@ -177,7 +177,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) #ifdef WITH_CONNTRACK nf_conntrack_put(skb->nfct); - skb->nfct = &nf_conntrack_untracked.ct_general; + skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index e1a0dedac2580b1aeb5fe1d52d6e5f60f082005b..c61294d85fdafbcb4696b704a6754f9f1c9476c1 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -37,8 +37,10 @@ tproxy_tg(struct sk_buff *skb, const struct xt_action_param *par) return NF_DROP; sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, - iph->saddr, tgi->laddr ? tgi->laddr : iph->daddr, - hp->source, tgi->lport ? tgi->lport : hp->dest, + iph->saddr, + tgi->laddr ? tgi->laddr : iph->daddr, + hp->source, + tgi->lport ? tgi->lport : hp->dest, par->in, true); /* NOTE: assign_sock consumes our sk reference */ diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 30b95a1c1c892da3018bbf9084e4fe3a8d59b4b1..f4af1bfafb1c61642ddb56ac490f7e161df5d39d 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c @@ -120,7 +120,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) if (ct == NULL) return false; - if (ct == &nf_conntrack_untracked) + if (nf_ct_is_untracked(ct)) return false; if (ct->master) diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index 73517835303d4fa68b2b34247d3907e2a7f4c3ca..5b138506690ec578105911ffb64309b5297d6458 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c @@ -112,6 +112,16 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par) if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); + + /* + * This filter cannot function correctly unless connection tracking + * accounting is enabled, so complain in the hope that someone notices. + */ + if (!nf_ct_acct_enabled(par->net)) { + pr_warning("Forcing CT accounting to be enabled\n"); + nf_ct_set_acct(par->net, true); + } + return ret; } diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 39681f10291c13a8a72efbfebad857ca10a7d750..e536710ad916246a4f558eaad02092d4c81168df 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c @@ -123,11 +123,12 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, ct = nf_ct_get(skb, &ctinfo); - if (ct == &nf_conntrack_untracked) - statebit = XT_CONNTRACK_STATE_UNTRACKED; - else if (ct != NULL) - statebit = XT_CONNTRACK_STATE_BIT(ctinfo); - else + if (ct) { + if (nf_ct_is_untracked(ct)) + statebit = XT_CONNTRACK_STATE_UNTRACKED; + else + statebit = XT_CONNTRACK_STATE_BIT(ctinfo); + } else statebit = XT_CONNTRACK_STATE_INVALID; if (info->match_flags & XT_CONNTRACK_STATE) { diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..b39db8a5cbae8bb36618880effef34aaff1dc606 --- /dev/null +++ b/net/netfilter/xt_cpu.c @@ -0,0 +1,63 @@ +/* Kernel module to match running CPU */ + +/* + * Might be used to distribute connections on several daemons, if + * RPS (Remote Packet Steering) is enabled or NIC is multiqueue capable, + * each RX queue IRQ affined to one CPU (1:1 mapping) + * + */ + +/* (C) 2010 Eric Dumazet + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Dumazet "); +MODULE_DESCRIPTION("Xtables: CPU match"); + +static int cpu_mt_check(const struct xt_mtchk_param *par) +{ + const struct xt_cpu_info *info = par->matchinfo; + + if (info->invert & ~1) + return -EINVAL; + return 0; +} + +static bool cpu_mt(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct xt_cpu_info *info = par->matchinfo; + + return (info->cpu == smp_processor_id()) ^ info->invert; +} + +static struct xt_match cpu_mt_reg __read_mostly = { + .name = "cpu", + .revision = 0, + .family = NFPROTO_UNSPEC, + .checkentry = cpu_mt_check, + .match = cpu_mt, + .matchsize = sizeof(struct xt_cpu_info), + .me = THIS_MODULE, +}; + +static int __init cpu_mt_init(void) +{ + return xt_register_match(&cpu_mt_reg); +} + +static void __exit cpu_mt_exit(void) +{ + xt_unregister_match(&cpu_mt_reg); +} + +module_init(cpu_mt_init); +module_exit(cpu_mt_exit); diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c new file mode 100644 index 0000000000000000000000000000000000000000..7a4d66db95aed4bade12679308045c942c155ba3 --- /dev/null +++ b/net/netfilter/xt_ipvs.c @@ -0,0 +1,189 @@ +/* + * xt_ipvs - kernel module to match IPVS connection properties + * + * Author: Hannes Eder + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#ifdef CONFIG_IP_VS_IPV6 +#include +#endif +#include +#include +#include +#include +#include +#include + +#include + +MODULE_AUTHOR("Hannes Eder "); +MODULE_DESCRIPTION("Xtables: match IPVS connection properties"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("ipt_ipvs"); +MODULE_ALIAS("ip6t_ipvs"); + +/* borrowed from xt_conntrack */ +static bool ipvs_mt_addrcmp(const union nf_inet_addr *kaddr, + const union nf_inet_addr *uaddr, + const union nf_inet_addr *umask, + unsigned int l3proto) +{ + if (l3proto == NFPROTO_IPV4) + return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0; +#ifdef CONFIG_IP_VS_IPV6 + else if (l3proto == NFPROTO_IPV6) + return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6, + &uaddr->in6) == 0; +#endif + else + return false; +} + +static bool +ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct xt_ipvs_mtinfo *data = par->matchinfo; + /* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */ + const u_int8_t family = par->family; + struct ip_vs_iphdr iph; + struct ip_vs_protocol *pp; + struct ip_vs_conn *cp; + bool match = true; + + if (data->bitmask == XT_IPVS_IPVS_PROPERTY) { + match = skb->ipvs_property ^ + !!(data->invert & XT_IPVS_IPVS_PROPERTY); + goto out; + } + + /* other flags than XT_IPVS_IPVS_PROPERTY are set */ + if (!skb->ipvs_property) { + match = false; + goto out; + } + + ip_vs_fill_iphdr(family, skb_network_header(skb), &iph); + + if (data->bitmask & XT_IPVS_PROTO) + if ((iph.protocol == data->l4proto) ^ + !(data->invert & XT_IPVS_PROTO)) { + match = false; + goto out; + } + + pp = ip_vs_proto_get(iph.protocol); + if (unlikely(!pp)) { + match = false; + goto out; + } + + /* + * Check if the packet belongs to an existing entry + */ + cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */); + if (unlikely(cp == NULL)) { + match = false; + goto out; + } + + /* + * We found a connection, i.e. ct != 0, make sure to call + * __ip_vs_conn_put before returning. In our case jump to out_put_con. + */ + + if (data->bitmask & XT_IPVS_VPORT) + if ((cp->vport == data->vport) ^ + !(data->invert & XT_IPVS_VPORT)) { + match = false; + goto out_put_cp; + } + + if (data->bitmask & XT_IPVS_VPORTCTL) + if ((cp->control != NULL && + cp->control->vport == data->vportctl) ^ + !(data->invert & XT_IPVS_VPORTCTL)) { + match = false; + goto out_put_cp; + } + + if (data->bitmask & XT_IPVS_DIR) { + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + + if (ct == NULL || nf_ct_is_untracked(ct)) { + match = false; + goto out_put_cp; + } + + if ((ctinfo >= IP_CT_IS_REPLY) ^ + !!(data->invert & XT_IPVS_DIR)) { + match = false; + goto out_put_cp; + } + } + + if (data->bitmask & XT_IPVS_METHOD) + if (((cp->flags & IP_VS_CONN_F_FWD_MASK) == data->fwd_method) ^ + !(data->invert & XT_IPVS_METHOD)) { + match = false; + goto out_put_cp; + } + + if (data->bitmask & XT_IPVS_VADDR) { + if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr, + &data->vmask, family) ^ + !(data->invert & XT_IPVS_VADDR)) { + match = false; + goto out_put_cp; + } + } + +out_put_cp: + __ip_vs_conn_put(cp); +out: + pr_debug("match=%d\n", match); + return match; +} + +static int ipvs_mt_check(const struct xt_mtchk_param *par) +{ + if (par->family != NFPROTO_IPV4 +#ifdef CONFIG_IP_VS_IPV6 + && par->family != NFPROTO_IPV6 +#endif + ) { + pr_info("protocol family %u not supported\n", par->family); + return -EINVAL; + } + + return 0; +} + +static struct xt_match xt_ipvs_mt_reg __read_mostly = { + .name = "ipvs", + .revision = 0, + .family = NFPROTO_UNSPEC, + .match = ipvs_mt, + .checkentry = ipvs_mt_check, + .matchsize = XT_ALIGN(sizeof(struct xt_ipvs_mtinfo)), + .me = THIS_MODULE, +}; + +static int __init ipvs_mt_init(void) +{ + return xt_register_match(&xt_ipvs_mt_reg); +} + +static void __exit ipvs_mt_exit(void) +{ + xt_unregister_match(&xt_ipvs_mt_reg); +} + +module_init(ipvs_mt_init); +module_exit(ipvs_mt_exit); diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c index b4f7dfea59805f3c705859163bfcf57fcb6b4da8..70eb2b4984ddb277e052458f325599910e57d875 100644 --- a/net/netfilter/xt_quota.c +++ b/net/netfilter/xt_quota.c @@ -11,7 +11,8 @@ #include struct xt_quota_priv { - uint64_t quota; + spinlock_t lock; + uint64_t quota; }; MODULE_LICENSE("GPL"); @@ -20,8 +21,6 @@ MODULE_DESCRIPTION("Xtables: countdown quota match"); MODULE_ALIAS("ipt_quota"); MODULE_ALIAS("ip6t_quota"); -static DEFINE_SPINLOCK(quota_lock); - static bool quota_mt(const struct sk_buff *skb, struct xt_action_param *par) { @@ -29,7 +28,7 @@ quota_mt(const struct sk_buff *skb, struct xt_action_param *par) struct xt_quota_priv *priv = q->master; bool ret = q->flags & XT_QUOTA_INVERT; - spin_lock_bh("a_lock); + spin_lock_bh(&priv->lock); if (priv->quota >= skb->len) { priv->quota -= skb->len; ret = !ret; @@ -37,9 +36,7 @@ quota_mt(const struct sk_buff *skb, struct xt_action_param *par) /* we do not allow even small packets from now on */ priv->quota = 0; } - /* Copy quota back to matchinfo so that iptables can display it */ - q->quota = priv->quota; - spin_unlock_bh("a_lock); + spin_unlock_bh(&priv->lock); return ret; } @@ -55,6 +52,7 @@ static int quota_mt_check(const struct xt_mtchk_param *par) if (q->master == NULL) return -ENOMEM; + spin_lock_init(&q->master->lock); q->master->quota = q->quota; return 0; } diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c index c04fcf385c591875ec45f2f50812b9c564657869..ef36a56a02c6881c58296b2bf45c4b99d3836456 100644 --- a/net/netfilter/xt_sctp.c +++ b/net/netfilter/xt_sctp.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -67,7 +68,7 @@ match_packet(const struct sk_buff *skb, ++i, offset, sch->type, htons(sch->length), sch->flags); #endif - offset += (ntohs(sch->length) + 3) & ~3; + offset += WORD_ROUND(ntohs(sch->length)); pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 3d54c236a1ba0643ba0077eee8caf4f93078155f..1ca89908cbad84d2dc45093aa474cca056f239e3 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -127,7 +127,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, * reply packet of an established SNAT-ted connection. */ ct = nf_ct_get(skb, &ctinfo); - if (ct && (ct != &nf_conntrack_untracked) && + if (ct && !nf_ct_is_untracked(ct) && ((iph->protocol != IPPROTO_ICMP && ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || (iph->protocol == IPPROTO_ICMP && diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c index e12e053d3782ba8f9252a5b5b8a2d3085b2345c3..a507922d80cdc2854a141acde78e5410bf918a48 100644 --- a/net/netfilter/xt_state.c +++ b/net/netfilter/xt_state.c @@ -26,14 +26,16 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par) const struct xt_state_info *sinfo = par->matchinfo; enum ip_conntrack_info ctinfo; unsigned int statebit; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (nf_ct_is_untracked(skb)) - statebit = XT_STATE_UNTRACKED; - else if (!nf_ct_get(skb, &ctinfo)) + if (!ct) statebit = XT_STATE_INVALID; - else - statebit = XT_STATE_BIT(ctinfo); - + else { + if (nf_ct_is_untracked(ct)) + statebit = XT_STATE_UNTRACKED; + else + statebit = XT_STATE_BIT(ctinfo); + } return (sinfo->statemask & statebit); } diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c index 96e62b8fd6b10d063f7bdc4cc2ee0ba28ed9991a..42ecb71d445fe6d009ac252d1b030cd86befcfb1 100644 --- a/net/netfilter/xt_statistic.c +++ b/net/netfilter/xt_statistic.c @@ -18,8 +18,8 @@ #include struct xt_statistic_priv { - uint32_t count; -}; + atomic_t count; +} ____cacheline_aligned_in_smp; MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); @@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)"); MODULE_ALIAS("ipt_statistic"); MODULE_ALIAS("ip6t_statistic"); -static DEFINE_SPINLOCK(nth_lock); - static bool statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_statistic_info *info = par->matchinfo; bool ret = info->flags & XT_STATISTIC_INVERT; + int nval, oval; switch (info->mode) { case XT_STATISTIC_MODE_RANDOM: @@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) ret = !ret; break; case XT_STATISTIC_MODE_NTH: - spin_lock_bh(&nth_lock); - if (info->master->count++ == info->u.nth.every) { - info->master->count = 0; + do { + oval = atomic_read(&info->master->count); + nval = (oval == info->u.nth.every) ? 0 : oval + 1; + } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval); + if (nval == 0) ret = !ret; - } - spin_unlock_bh(&nth_lock); break; } @@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par) info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); if (info->master == NULL) return -ENOMEM; - info->master->count = info->u.nth.count; + atomic_set(&info->master->count, info->u.nth.count); return 0; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a2eb965207d339682728b5dd5a52104a8d4f848b..2cbf380377d5e009fa972d85af18ded9971a5248 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1076,14 +1076,15 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) do_one_broadcast(sk, &info); - kfree_skb(skb); + consume_skb(skb); netlink_unlock_table(); - kfree_skb(info.skb2); - - if (info.delivery_failure) + if (info.delivery_failure) { + kfree_skb(info.skb2); return -ENOBUFS; + } else + consume_skb(info.skb2); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) @@ -1323,19 +1324,23 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; - if (NULL == siocb->scm) + if (NULL == siocb->scm) { siocb->scm = &scm; + memset(&scm, 0, sizeof(scm)); + } err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; if (msg->msg_namelen) { + err = -EINVAL; if (addr->nl_family != AF_NETLINK) - return -EINVAL; + goto out; dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); + err = -EPERM; if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) - return -EPERM; + goto out; } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; @@ -1387,6 +1392,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: + scm_destroy(siocb->scm); return err; } @@ -1400,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, struct netlink_sock *nlk = nlk_sk(sk); int noblock = flags&MSG_DONTWAIT; size_t copied; - struct sk_buff *skb, *frag __maybe_unused = NULL; + struct sk_buff *skb; int err; if (flags&MSG_OOB) @@ -1435,7 +1441,21 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, kfree_skb(skb); skb = compskb; } else { - frag = skb_shinfo(skb)->frag_list; + /* + * Before setting frag_list to NULL, we must get a + * private copy of skb if shared (because of MSG_PEEK) + */ + if (skb_shared(skb)) { + struct sk_buff *nskb; + + nskb = pskb_copy(skb, GFP_KERNEL); + kfree_skb(skb); + skb = nskb; + err = -ENOMEM; + if (!skb) + goto out; + } + kfree_skb(skb_shinfo(skb)->frag_list); skb_shinfo(skb)->frag_list = NULL; } } @@ -1472,10 +1492,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, if (flags & MSG_TRUNC) copied = skb->len; -#ifdef CONFIG_COMPAT_NETLINK_MESSAGES - skb_shinfo(skb)->frag_list = frag; -#endif - skb_free_datagram(sk, skb); if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index aa4308afcc7f07168d9317235e795218d878acdd..26ed3e8587c20f13a22c7aa2e2abf91dedf3d62a 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -303,6 +303,7 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops) errout: return err; } +EXPORT_SYMBOL(genl_register_ops); /** * genl_unregister_ops - unregister generic netlink operations @@ -337,6 +338,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops) return -ENOENT; } +EXPORT_SYMBOL(genl_unregister_ops); /** * genl_register_family - register a generic netlink family @@ -405,6 +407,7 @@ int genl_register_family(struct genl_family *family) errout: return err; } +EXPORT_SYMBOL(genl_register_family); /** * genl_register_family_with_ops - register a generic netlink family @@ -485,6 +488,7 @@ int genl_unregister_family(struct genl_family *family) return -ENOENT; } +EXPORT_SYMBOL(genl_unregister_family); static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { @@ -873,11 +877,7 @@ static int __init genl_init(void) for (i = 0; i < GENL_FAM_TAB_SIZE; i++) INIT_LIST_HEAD(&family_ht[i]); - err = genl_register_family(&genl_ctrl); - if (err < 0) - goto problem; - - err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops); + err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1); if (err < 0) goto problem; @@ -899,11 +899,6 @@ static int __init genl_init(void) subsys_initcall(genl_init); -EXPORT_SYMBOL(genl_register_ops); -EXPORT_SYMBOL(genl_unregister_ops); -EXPORT_SYMBOL(genl_register_family); -EXPORT_SYMBOL(genl_unregister_family); - static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group, gfp_t flags) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2078a277e06b53f4e89181f8f4bb6bca21f7264a..9a17f28b1253cc2c96fc57a1b94cac4fa1621238 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -83,6 +83,7 @@ #include #include #include +#include #ifdef CONFIG_INET #include @@ -202,6 +203,7 @@ struct packet_sock { unsigned int tp_hdrlen; unsigned int tp_reserve; unsigned int tp_loss:1; + unsigned int tp_tstamp; struct packet_type prot_hook ____cacheline_aligned_in_smp; }; @@ -656,6 +658,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct sk_buff *copy_skb = NULL; struct timeval tv; struct timespec ts; + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; @@ -737,7 +740,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; - if (skb->tstamp.tv64) + if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) + && shhwtstamps->syststamp.tv64) + tv = ktime_to_timeval(shhwtstamps->syststamp); + else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) + && shhwtstamps->hwtstamp.tv64) + tv = ktime_to_timeval(shhwtstamps->hwtstamp); + else if (skb->tstamp.tv64) tv = ktime_to_timeval(skb->tstamp); else do_gettimeofday(&tv); @@ -750,7 +759,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; - if (skb->tstamp.tv64) + if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) + && shhwtstamps->syststamp.tv64) + ts = ktime_to_timespec(shhwtstamps->syststamp); + else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) + && shhwtstamps->hwtstamp.tv64) + ts = ktime_to_timespec(shhwtstamps->hwtstamp); + else if (skb->tstamp.tv64) ts = ktime_to_timespec(skb->tstamp); else getnstimeofday(&ts); @@ -2027,6 +2042,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv po->has_vnet_hdr = !!val; return 0; } + case PACKET_TIMESTAMP: + { + int val; + + if (optlen != sizeof(val)) + return -EINVAL; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; + + po->tp_tstamp = val; + return 0; + } default: return -ENOPROTOOPT; } @@ -2119,6 +2146,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, val = po->tp_loss; data = &val; break; + case PACKET_TIMESTAMP: + if (len > sizeof(int)) + len = sizeof(int); + val = po->tp_tstamp; + data = &val; + break; default: return -ENOPROTOOPT; } diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index c33da65769420555a4a3cb60c0d5585519111f11..b18e48fae9759cf20d735031bc1f18a7e8857a11 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -162,6 +162,14 @@ int phonet_address_add(struct net_device *dev, u8 addr) return err; } +static void phonet_device_rcu_free(struct rcu_head *head) +{ + struct phonet_device *pnd; + + pnd = container_of(head, struct phonet_device, rcu); + kfree(pnd); +} + int phonet_address_del(struct net_device *dev, u8 addr) { struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); @@ -179,10 +187,9 @@ int phonet_address_del(struct net_device *dev, u8 addr) pnd = NULL; mutex_unlock(&pndevs->lock); - if (pnd) { - synchronize_rcu(); - kfree(pnd); - } + if (pnd) + call_rcu(&pnd->rcu, phonet_device_rcu_free); + return err; } diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index cbc244a128bd1611829f6e943ae7a669cc81f2d3..b4fdaac233f77ec3dbe4bd86b0f8d5bc178bb9f7 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -109,7 +109,9 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route, init_timer(&rose_neigh->t0timer); if (rose_route->ndigis != 0) { - if ((rose_neigh->digipeat = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) { + rose_neigh->digipeat = + kmalloc(sizeof(ax25_digi), GFP_ATOMIC); + if (rose_neigh->digipeat == NULL) { kfree(rose_neigh); res = -ENOMEM; goto out; diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c index f0f85b0123f7b58f3d9e34048af2bcf49fcf4fa5..9f1729bd60de35a66171b0d7853555cfc9516bcf 100644 --- a/net/rxrpc/ar-peer.c +++ b/net/rxrpc/ar-peer.c @@ -64,8 +64,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) return; } - peer->if_mtu = dst_mtu(&rt->u.dst); - dst_release(&rt->u.dst); + peer->if_mtu = dst_mtu(&rt->dst); + dst_release(&rt->dst); _leave(" [if_mtu %u]", peer->if_mtu); } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 972378f47f3cfd2ef03b33ac3a3a5ea7801e7afe..23b25f89e7e00d7c9219a3ec2e0a0b33e49fc552 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -26,6 +26,11 @@ #include #include +static void tcf_common_free_rcu(struct rcu_head *head) +{ + kfree(container_of(head, struct tcf_common, tcfc_rcu)); +} + void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) { unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); @@ -38,7 +43,11 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) write_unlock_bh(hinfo->lock); gen_kill_estimator(&p->tcfc_bstats, &p->tcfc_rate_est); - kfree(p); + /* + * gen_estimator est_timer() might access p->tcfc_lock + * or bstats, wait a RCU grace period before freeing p + */ + call_rcu(&p->tcfc_rcu, tcf_common_free_rcu); return; } } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 1980b71c283ffbe9e50ac4f6326f91d41a40a6cb..11f195af2da0732aaf362380928e298f7f35a199 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -165,6 +165,8 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, spin_lock(&m->tcf_lock); m->tcf_tm.lastuse = jiffies; + m->tcf_bstats.bytes += qdisc_pkt_len(skb); + m->tcf_bstats.packets++; dev = m->tcfm_dev; if (!dev) { @@ -179,13 +181,11 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, goto out; } - skb2 = skb_act_clone(skb, GFP_ATOMIC); + at = G_TC_AT(skb->tc_verd); + skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action); if (skb2 == NULL) goto out; - m->tcf_bstats.bytes += qdisc_pkt_len(skb2); - m->tcf_bstats.packets++; - at = G_TC_AT(skb->tc_verd); if (!(at & AT_EGRESS)) { if (m->tcfm_ok_push) skb_push(skb2, skb2->dev->hard_header_len); @@ -195,16 +195,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, if (m->tcfm_eaction != TCA_EGRESS_MIRROR) skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); - skb2->dev = dev; skb2->skb_iif = skb->dev->ifindex; + skb2->dev = dev; dev_queue_xmit(skb2); err = 0; out: if (err) { m->tcf_qstats.overlimits++; - m->tcf_bstats.bytes += qdisc_pkt_len(skb); - m->tcf_bstats.packets++; /* should we be asking for packet to be dropped? * may make sense for redirect case only */ diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 724553e8ed7bc9d8ecd668c71ab9373936a3d2d3..d0386a413e8dc9406d470e66af196efc40f4e226 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -218,6 +218,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) goto drop; + icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); if (egress) addr = iph->daddr; @@ -246,7 +247,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, iph->saddr = new_addr; inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, - 1); + 0); break; } default: @@ -268,40 +269,29 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, { unsigned char *b = skb_tail_pointer(skb); struct tcf_nat *p = a->priv; - struct tc_nat *opt; + struct tc_nat opt; struct tcf_t t; - int s; - - s = sizeof(*opt); - /* netlink spinlocks held above us - must use ATOMIC */ - opt = kzalloc(s, GFP_ATOMIC); - if (unlikely(!opt)) - return -ENOBUFS; + opt.old_addr = p->old_addr; + opt.new_addr = p->new_addr; + opt.mask = p->mask; + opt.flags = p->flags; - opt->old_addr = p->old_addr; - opt->new_addr = p->new_addr; - opt->mask = p->mask; - opt->flags = p->flags; + opt.index = p->tcf_index; + opt.action = p->tcf_action; + opt.refcnt = p->tcf_refcnt - ref; + opt.bindcnt = p->tcf_bindcnt - bind; - opt->index = p->tcf_index; - opt->action = p->tcf_action; - opt->refcnt = p->tcf_refcnt - ref; - opt->bindcnt = p->tcf_bindcnt - bind; - - NLA_PUT(skb, TCA_NAT_PARMS, s, opt); + NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); - kfree(opt); - return skb->len; nla_put_failure: nlmsg_trim(skb, b); - kfree(opt); return -1; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 50e3d945e1f48a12b8a499aeb07d50f3d286017a..a0593c9640db1def9adbd1ce1483973dfb300c4c 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -127,8 +127,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, int i, munged = 0; unsigned int off; - if (!(skb->tc_verd & TC_OK2MUNGE)) { - /* should we set skb->cloned? */ + if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { return p->tcf_action; } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 654f73dff7c1c8d7f9d5fb6541174299c4b32f05..537a48732e9e0b8513dd324801c342ec00cd3820 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -97,6 +97,11 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c goto done; } +static void tcf_police_free_rcu(struct rcu_head *head) +{ + kfree(container_of(head, struct tcf_police, tcf_rcu)); +} + static void tcf_police_destroy(struct tcf_police *p) { unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); @@ -113,7 +118,11 @@ static void tcf_police_destroy(struct tcf_police *p) qdisc_put_rtab(p->tcfp_R_tab); if (p->tcfp_P_tab) qdisc_put_rtab(p->tcfp_P_tab); - kfree(p); + /* + * gen_estimator est_timer() might access p->tcf_lock + * or bstats, wait a RCU grace period before freeing p + */ + call_rcu(&p->tcf_rcu, tcf_police_free_rcu); return; } } @@ -397,6 +406,7 @@ static void __exit police_cleanup_module(void) { tcf_unregister_action(&act_police_ops); + rcu_barrier(); /* Wait for completion of call_rcu()'s (tcf_police_free_rcu) */ } module_init(police_init_module); diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 1b4bc691d7d1d0b5d716ddc681f319e23c19c98c..4a1d640b0cf16d842a26e3de807df2d937752cc8 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -73,10 +73,10 @@ static int tcf_simp_release(struct tcf_defact *d, int bind) static int alloc_defdata(struct tcf_defact *d, char *defdata) { - d->tcfd_defdata = kstrndup(defdata, SIMP_MAX_DATA, GFP_KERNEL); + d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL); if (unlikely(!d->tcfd_defdata)) return -ENOMEM; - + strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); return 0; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 4f522143811e467fe70e7e4b13db47f979b11b8a..7416a5c73b2a993550991ac66eca7cc254c6f2e6 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -134,10 +134,12 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re #endif for (i = n->sel.nkeys; i>0; i--, key++) { - unsigned int toff; + int toff = off + key->off + (off2 & key->offmask); __be32 *data, _data; - toff = off + key->off + (off2 & key->offmask); + if (skb_headroom(skb) + toff < 0) + goto out; + data = skb_header_pointer(skb, toff, 4, &_data); if (!data) goto out; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index fcbb86a486a21ab3c9a51a63bd248f809817dbd0..e114f23d5eaeb189428fbf4d307669da80e4f255 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -52,7 +52,7 @@ struct atm_flow_data { int ref; /* reference count */ struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct atm_flow_data *next; + struct list_head list; struct atm_flow_data *excess; /* flow for excess traffic; NULL to set CLP instead */ int hdr_len; @@ -61,34 +61,23 @@ struct atm_flow_data { struct atm_qdisc_data { struct atm_flow_data link; /* unclassified skbs go here */ - struct atm_flow_data *flows; /* NB: "link" is also on this + struct list_head flows; /* NB: "link" is also on this list */ struct tasklet_struct task; /* dequeue tasklet */ }; /* ------------------------- Class/flow operations ------------------------- */ -static int find_flow(struct atm_qdisc_data *qdisc, struct atm_flow_data *flow) -{ - struct atm_flow_data *walk; - - pr_debug("find_flow(qdisc %p,flow %p)\n", qdisc, flow); - for (walk = qdisc->flows; walk; walk = walk->next) - if (walk == flow) - return 1; - pr_debug("find_flow: not found\n"); - return 0; -} - static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; - for (flow = p->flows; flow; flow = flow->next) + list_for_each_entry(flow, &p->flows, list) { if (flow->classid == classid) - break; - return flow; + return flow; + } + return NULL; } static int atm_tc_graft(struct Qdisc *sch, unsigned long arg, @@ -99,7 +88,7 @@ static int atm_tc_graft(struct Qdisc *sch, unsigned long arg, pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", sch, p, flow, new, old); - if (!find_flow(p, flow)) + if (list_empty(&flow->list)) return -EINVAL; if (!new) new = &noop_qdisc; @@ -146,20 +135,12 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)cl; - struct atm_flow_data **prev; pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); if (--flow->ref) return; pr_debug("atm_tc_put: destroying\n"); - for (prev = &p->flows; *prev; prev = &(*prev)->next) - if (*prev == flow) - break; - if (!*prev) { - printk(KERN_CRIT "atm_tc_put: class %p not found\n", flow); - return; - } - *prev = flow->next; + list_del_init(&flow->list); pr_debug("atm_tc_put: qdisc %p\n", flow->q); qdisc_destroy(flow->q); tcf_destroy_chain(&flow->filter_list); @@ -274,7 +255,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, error = -EINVAL; goto err_out; } - if (find_flow(p, flow)) { + if (!list_empty(&flow->list)) { error = -EEXIST; goto err_out; } @@ -313,8 +294,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, flow->classid = classid; flow->ref = 1; flow->excess = excess; - flow->next = p->link.next; - p->link.next = flow; + list_add(&flow->list, &p->link.list); flow->hdr_len = hdr_len; if (hdr) memcpy(flow->hdr, hdr, hdr_len); @@ -335,7 +315,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) struct atm_flow_data *flow = (struct atm_flow_data *)arg; pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); - if (!find_flow(qdisc_priv(sch), flow)) + if (list_empty(&flow->list)) return -EINVAL; if (flow->filter_list || flow == &p->link) return -EBUSY; @@ -361,12 +341,12 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker); if (walker->stop) return; - for (flow = p->flows; flow; flow = flow->next) { - if (walker->count >= walker->skip) - if (walker->fn(sch, (unsigned long)flow, walker) < 0) { - walker->stop = 1; - break; - } + list_for_each_entry(flow, &p->flows, list) { + if (walker->count >= walker->skip && + walker->fn(sch, (unsigned long)flow, walker) < 0) { + walker->stop = 1; + break; + } walker->count++; } } @@ -385,16 +365,17 @@ static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl) static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); - struct atm_flow_data *flow = NULL; /* @@@ */ + struct atm_flow_data *flow; struct tcf_result res; int result; int ret = NET_XMIT_POLICED; pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); result = TC_POLICE_OK; /* be nice to gcc */ + flow = NULL; if (TC_H_MAJ(skb->priority) != sch->handle || - !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) - for (flow = p->flows; flow; flow = flow->next) + !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { + list_for_each_entry(flow, &p->flows, list) { if (flow->filter_list) { result = tc_classify_compat(skb, flow->filter_list, @@ -404,8 +385,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow = (struct atm_flow_data *)res.class; if (!flow) flow = lookup_flow(sch, res.classid); - break; + goto done; } + } + flow = NULL; + done: + ; + } if (!flow) flow = &p->link; else { @@ -477,7 +463,9 @@ static void sch_atm_dequeue(unsigned long data) struct sk_buff *skb; pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p); - for (flow = p->link.next; flow; flow = flow->next) + list_for_each_entry(flow, &p->flows, list) { + if (flow == &p->link) + continue; /* * If traffic is properly shaped, this won't generate nasty * little bursts. Otherwise, it may ... (but that's okay) @@ -512,6 +500,7 @@ static void sch_atm_dequeue(unsigned long data) /* atm.atm_options are already set by atm_tc_enqueue */ flow->vcc->send(flow->vcc, skb); } + } } static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) @@ -543,9 +532,10 @@ static unsigned int atm_tc_drop(struct Qdisc *sch) unsigned int len; pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p); - for (flow = p->flows; flow; flow = flow->next) + list_for_each_entry(flow, &p->flows, list) { if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q))) return len; + } return 0; } @@ -554,7 +544,9 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) struct atm_qdisc_data *p = qdisc_priv(sch); pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); - p->flows = &p->link; + INIT_LIST_HEAD(&p->flows); + INIT_LIST_HEAD(&p->link.list); + list_add(&p->link.list, &p->flows); p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (!p->link.q) @@ -565,7 +557,6 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) p->link.sock = NULL; p->link.classid = sch->handle; p->link.ref = 1; - p->link.next = NULL; tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); return 0; } @@ -576,7 +567,7 @@ static void atm_tc_reset(struct Qdisc *sch) struct atm_flow_data *flow; pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); - for (flow = p->flows; flow; flow = flow->next) + list_for_each_entry(flow, &p->flows, list) qdisc_reset(flow->q); sch->q.qlen = 0; } @@ -584,24 +575,17 @@ static void atm_tc_reset(struct Qdisc *sch) static void atm_tc_destroy(struct Qdisc *sch) { struct atm_qdisc_data *p = qdisc_priv(sch); - struct atm_flow_data *flow; + struct atm_flow_data *flow, *tmp; pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p); - for (flow = p->flows; flow; flow = flow->next) + list_for_each_entry(flow, &p->flows, list) tcf_destroy_chain(&flow->filter_list); - /* races ? */ - while ((flow = p->flows)) { + list_for_each_entry_safe(flow, tmp, &p->flows, list) { if (flow->ref > 1) printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow, flow->ref); atm_tc_put(sch, (unsigned long)flow); - if (p->flows == flow) { - printk(KERN_ERR "atm_destroy: putting flow %p didn't " - "kill it\n", flow); - p->flows = flow->next; /* brute force */ - break; - } } tasklet_kill(&p->task); } @@ -615,7 +599,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", sch, p, flow, skb, tcm); - if (!find_flow(p, flow)) + if (list_empty(&flow->list)) return -EINVAL; tcm->tcm_handle = flow->classid; tcm->tcm_info = flow->q->handle; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a63029ef3eddc9e1bc19cbc552f327e6fdafa8aa..2aeb3a4386a10f499f020c961bada4803653985b 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -96,7 +96,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, * Another cpu is holding lock, requeue & delay xmits for * some time. */ - __get_cpu_var(softnet_data).cpu_collision++; + __this_cpu_inc(softnet_data.cpu_collision); ret = dev_requeue_skb(skb, q); } @@ -205,7 +205,7 @@ void __qdisc_run(struct Qdisc *q) } } - clear_bit(__QDISC_STATE_RUNNING, &q->state); + qdisc_run_end(q); } unsigned long dev_trans_start(struct net_device *dev) @@ -327,6 +327,24 @@ void netif_carrier_off(struct net_device *dev) } EXPORT_SYMBOL(netif_carrier_off); +/** + * netif_notify_peers - notify network peers about existence of @dev + * @dev: network device + * + * Generate traffic such that interested network peers are aware of + * @dev, such as by generating a gratuitous ARP. This may be used when + * a device wants to inform the rest of the network about some sort of + * reconfiguration such as a failover event or virtual machine + * migration. + */ +void netif_notify_peers(struct net_device *dev) +{ + rtnl_lock(); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); + rtnl_unlock(); +} +EXPORT_SYMBOL(netif_notify_peers); + /* "NOOP" scheduler: the best scheduler, recommended for all interfaces under all circumstances. It is difficult to invent anything faster or cheaper. @@ -543,6 +561,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, INIT_LIST_HEAD(&sch->list); skb_queue_head_init(&sch->q); + spin_lock_init(&sch->busylock); sch->ops = ops; sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; @@ -779,7 +798,7 @@ static bool some_qdisc_is_busy(struct net_device *dev) spin_lock_bh(root_lock); - val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || + val = (qdisc_is_running(q) || test_bit(__QDISC_STATE_SCHED, &q->state)); spin_unlock_bh(root_lock); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 0b52b8de562c40db1bd4541a08389885af621cb2..4be8d04b262d18963ac6617abaaeefc5151e16f5 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1550,7 +1550,6 @@ static const struct Qdisc_class_ops htb_class_ops = { }; static struct Qdisc_ops htb_qdisc_ops __read_mostly = { - .next = NULL, .cl_ops = &htb_class_ops, .id = "htb", .priv_size = sizeof(struct htb_sched), @@ -1561,7 +1560,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = { .init = htb_init, .reset = htb_reset, .destroy = htb_destroy, - .change = NULL /* htb_change */, .dump = htb_dump, .owner = THIS_MODULE, }; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 182749867c72a4c2b629d8675e1ad2de6340a536..c0e162aeb0bd722abe745a6bd041bb470c8df854 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -490,7 +490,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, __func__, &fl.fl4_dst, &fl.fl4_src); if (!ip_route_output_key(&init_net, &rt, &fl)) { - dst = &rt->u.dst; + dst = &rt->dst; } /* If there is no association or if a source address is passed, no @@ -534,7 +534,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, fl.fl4_src = laddr->a.v4.sin_addr.s_addr; fl.fl_ip_sport = laddr->a.v4.sin_port; if (!ip_route_output_key(&init_net, &rt, &fl)) { - dst = &rt->u.dst; + dst = &rt->dst; goto out_unlock; } } @@ -1002,7 +1002,8 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family) static inline int init_sctp_mibs(void) { return snmp_mib_init((void __percpu **)sctp_statistics, - sizeof(struct sctp_mib)); + sizeof(struct sctp_mib), + __alignof__(struct sctp_mib)); } static inline void cleanup_sctp_mibs(void) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index bd2a50b482ace5089f4857b6aa56a6f066e0f683..246f92924658b7d5dca18ae4d4a212d590b3e4d8 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1817,7 +1817,7 @@ struct sctp_association *sctp_unpack_cookie( struct __sctp_missing { __be32 num_missing; __be16 type; -} __attribute__((packed)); +} __packed; /* * Report a missing mandatory parameter. diff --git a/net/socket.c b/net/socket.c index 367d5477d00fee1eb9fcd3eba5e6f31b1383e312..2270b941bcc76ec0e52cc0d1321d8572164299e1 100644 --- a/net/socket.c +++ b/net/socket.c @@ -124,7 +124,7 @@ static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, + struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* @@ -162,7 +162,7 @@ static const struct net_proto_family *net_families[NPROTO] __read_mostly; * Statistics counters of the socket lists */ -static DEFINE_PER_CPU(int, sockets_in_use) = 0; +static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. @@ -170,15 +170,6 @@ static DEFINE_PER_CPU(int, sockets_in_use) = 0; * divide and look after the messy bits. */ -#define MAX_SOCK_ADDR 128 /* 108 for Unix domain - - 16 for IP, 16 for IPX, - 24 for IPv6, - about 80 for AX.25 - must be at least one bigger than - the AF_UNIX size (see net/unix/af_unix.c - :unix_mkname()). - */ - /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space @@ -309,9 +300,9 @@ static int init_inodecache(void) } static const struct super_operations sockfs_ops = { - .alloc_inode = sock_alloc_inode, - .destroy_inode =sock_destroy_inode, - .statfs = simple_statfs, + .alloc_inode = sock_alloc_inode, + .destroy_inode = sock_destroy_inode, + .statfs = simple_statfs, }; static int sockfs_get_sb(struct file_system_type *fs_type, @@ -411,6 +402,7 @@ int sock_map_fd(struct socket *sock, int flags) return fd; } +EXPORT_SYMBOL(sock_map_fd); static struct socket *sock_from_file(struct file *file, int *err) { @@ -422,7 +414,7 @@ static struct socket *sock_from_file(struct file *file, int *err) } /** - * sockfd_lookup - Go from a file number to its socket slot + * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * @@ -450,6 +442,7 @@ struct socket *sockfd_lookup(int fd, int *err) fput(file); return sock; } +EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { @@ -540,6 +533,7 @@ void sock_release(struct socket *sock) } sock->file = NULL; } +EXPORT_SYMBOL(sock_release); int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, union skb_shared_tx *shtx) @@ -586,6 +580,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) ret = wait_on_sync_kiocb(&iocb); return ret; } +EXPORT_SYMBOL(sock_sendmsg); int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) @@ -604,6 +599,7 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, set_fs(oldfs); return result; } +EXPORT_SYMBOL(kernel_sendmsg); static int ktime2ts(ktime_t kt, struct timespec *ts) { @@ -664,7 +660,6 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } - EXPORT_SYMBOL_GPL(__sock_recv_timestamp); inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) @@ -720,6 +715,7 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, ret = wait_on_sync_kiocb(&iocb); return ret; } +EXPORT_SYMBOL(sock_recvmsg); static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) @@ -752,6 +748,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, set_fs(oldfs); return result; } +EXPORT_SYMBOL(kernel_recvmsg); static void sock_aio_dtor(struct kiocb *iocb) { @@ -774,7 +771,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page, } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, + struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; @@ -887,7 +884,7 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, */ static DEFINE_MUTEX(br_ioctl_mutex); -static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL; +static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { @@ -895,7 +892,6 @@ void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } - EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); @@ -907,7 +903,6 @@ void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } - EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); @@ -919,7 +914,6 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } - EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, @@ -1047,6 +1041,7 @@ int sock_create_lite(int family, int type, int protocol, struct socket **res) sock = NULL; goto out; } +EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) @@ -1147,6 +1142,7 @@ int sock_wake_async(struct socket *sock, int how, int band) rcu_read_unlock(); return 0; } +EXPORT_SYMBOL(sock_wake_async); static int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) @@ -1265,11 +1261,13 @@ int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } +EXPORT_SYMBOL(sock_create); int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } +EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { @@ -1474,7 +1472,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, goto out; err = -ENFILE; - if (!(newsock = sock_alloc())) + newsock = sock_alloc(); + if (!newsock) goto out_put; newsock->type = sock->type; @@ -1861,8 +1860,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(&msg_sys, msg_compat)) return -EFAULT; - } - else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) + } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; sock = sockfd_lookup_light(fd, &err, &fput_needed); @@ -1964,8 +1962,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; - } - else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) + } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; err = -EMSGSIZE; @@ -2191,10 +2188,10 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[20] = { - AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), - AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), - AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), - AL(4),AL(5) + AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), + AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), + AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), + AL(4), AL(5) }; #undef AL @@ -2340,6 +2337,7 @@ int sock_register(const struct net_proto_family *ops) printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } +EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler @@ -2366,6 +2364,7 @@ void sock_unregister(int family) printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } +EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { @@ -2395,6 +2394,10 @@ static int __init sock_init(void) netfilter_init(); #endif +#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING + skb_timestamping_init(); +#endif + return 0; } @@ -2490,13 +2493,13 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { - size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) * - sizeof (struct ifreq); + size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * + sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); - for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) { + for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; @@ -2516,9 +2519,9 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; - i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; - i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) { - if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq))) + i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; + i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { + if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; @@ -2567,7 +2570,7 @@ static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32 compat_uptr_t uptr32; struct ifreq __user *uifr; - uifr = compat_alloc_user_space(sizeof (*uifr)); + uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; @@ -2601,9 +2604,9 @@ static int bond_ioctl(struct net *net, unsigned int cmd, return -EFAULT; old_fs = get_fs(); - set_fs (KERNEL_DS); + set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, &kifr); - set_fs (old_fs); + set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: @@ -2710,9 +2713,9 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, return -EFAULT; old_fs = get_fs(); - set_fs (KERNEL_DS); + set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user *)&ifr); - set_fs (old_fs); + set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); @@ -2734,7 +2737,7 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif compat_uptr_t uptr32; struct ifreq __user *uifr; - uifr = compat_alloc_user_space(sizeof (*uifr)); + uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; @@ -2750,20 +2753,20 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif } struct rtentry32 { - u32 rt_pad1; + u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ - unsigned short rt_flags; - short rt_pad2; - u32 rt_pad3; - unsigned char rt_tos; - unsigned char rt_class; - short rt_pad4; - short rt_metric; /* +1 for binary compatibility! */ + unsigned short rt_flags; + short rt_pad2; + u32 rt_pad3; + unsigned char rt_tos; + unsigned char rt_class; + short rt_pad4; + short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ - u32 rt_mtu; /* per route MTU/Window */ - u32 rt_window; /* Window clamping */ + u32 rt_mtu; /* per route MTU/Window */ + u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; @@ -2793,29 +2796,29 @@ static int routing_ioctl(struct net *net, struct socket *sock, if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; - ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst), + ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); - ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type)); - ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); - ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); - ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric)); - ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info)); - ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags)); - ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); + ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); + ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); + ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); + ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); + ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); + ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); + ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; - ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst), + ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); - ret |= __get_user (r4.rt_flags, &(ur4->rt_flags)); - ret |= __get_user (r4.rt_metric, &(ur4->rt_metric)); - ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu)); - ret |= __get_user (r4.rt_window, &(ur4->rt_window)); - ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt)); - ret |= __get_user (rtdev, &(ur4->rt_dev)); + ret |= __get_user(r4.rt_flags, &(ur4->rt_flags)); + ret |= __get_user(r4.rt_metric, &(ur4->rt_metric)); + ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu)); + ret |= __get_user(r4.rt_window, &(ur4->rt_window)); + ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt)); + ret |= __get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { - ret |= copy_from_user (devname, compat_ptr(rtdev), 15); + ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = devname; devname[15] = 0; } else r4.rt_dev = NULL; @@ -2828,9 +2831,9 @@ static int routing_ioctl(struct net *net, struct socket *sock, goto out; } - set_fs (KERNEL_DS); + set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); - set_fs (old_fs); + set_fs(old_fs); out: return ret; @@ -2993,11 +2996,13 @@ int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } +EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } +EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { @@ -3022,24 +3027,28 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags) done: return err; } +EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } +EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } +EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } +EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) @@ -3056,6 +3065,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, set_fs(oldfs); return err; } +EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) @@ -3072,6 +3082,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, set_fs(oldfs); return err; } +EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) @@ -3083,6 +3094,7 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, return sock_no_sendpage(sock, page, offset, size, flags); } +EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { @@ -3095,33 +3107,10 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) return err; } +EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } - -EXPORT_SYMBOL(sock_create); -EXPORT_SYMBOL(sock_create_kern); -EXPORT_SYMBOL(sock_create_lite); -EXPORT_SYMBOL(sock_map_fd); -EXPORT_SYMBOL(sock_recvmsg); -EXPORT_SYMBOL(sock_register); -EXPORT_SYMBOL(sock_release); -EXPORT_SYMBOL(sock_sendmsg); -EXPORT_SYMBOL(sock_unregister); -EXPORT_SYMBOL(sock_wake_async); -EXPORT_SYMBOL(sockfd_lookup); -EXPORT_SYMBOL(kernel_sendmsg); -EXPORT_SYMBOL(kernel_recvmsg); -EXPORT_SYMBOL(kernel_bind); -EXPORT_SYMBOL(kernel_listen); -EXPORT_SYMBOL(kernel_accept); -EXPORT_SYMBOL(kernel_connect); -EXPORT_SYMBOL(kernel_getsockname); -EXPORT_SYMBOL(kernel_getpeername); -EXPORT_SYMBOL(kernel_getsockopt); -EXPORT_SYMBOL(kernel_setsockopt); -EXPORT_SYMBOL(kernel_sendpage); -EXPORT_SYMBOL(kernel_sock_ioctl); EXPORT_SYMBOL(kernel_sock_shutdown); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fef2cc5e9d2bf48a8a26e32bcb1796a6c0165284..4414a18c63b49601357789fb5d6df78326819ba7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -282,7 +282,7 @@ static inline struct sock *unix_find_socket_byname(struct net *net, return s; } -static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i) +static struct sock *unix_find_socket_byinode(struct inode *i) { struct sock *s; struct hlist_node *node; @@ -292,9 +292,6 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i) &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { struct dentry *dentry = unix_sk(s)->dentry; - if (!net_eq(sock_net(s), net)) - continue; - if (dentry && dentry->d_inode == i) { sock_hold(s); goto found; @@ -450,11 +447,31 @@ static int unix_release_sock(struct sock *sk, int embrion) return 0; } +static void init_peercred(struct sock *sk) +{ + put_pid(sk->sk_peer_pid); + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); + sk->sk_peer_pid = get_pid(task_tgid(current)); + sk->sk_peer_cred = get_current_cred(); +} + +static void copy_peercred(struct sock *sk, struct sock *peersk) +{ + put_pid(sk->sk_peer_pid); + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); + sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); + sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); +} + static int unix_listen(struct socket *sock, int backlog) { int err; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); + struct pid *old_pid = NULL; + const struct cred *old_cred = NULL; err = -EOPNOTSUPP; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) @@ -470,12 +487,14 @@ static int unix_listen(struct socket *sock, int backlog) sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; /* set credentials so connect can copy them */ - sk->sk_peercred.pid = task_tgid_vnr(current); - current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid); + init_peercred(sk); err = 0; out_unlock: unix_state_unlock(sk); + put_pid(old_pid); + if (old_cred) + put_cred(old_cred); out: return err; } @@ -736,7 +755,7 @@ static struct sock *unix_find_other(struct net *net, err = -ECONNREFUSED; if (!S_ISSOCK(inode->i_mode)) goto put_fail; - u = unix_find_socket_byinode(net, inode); + u = unix_find_socket_byinode(inode); if (!u) goto put_fail; @@ -1140,8 +1159,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, unix_peer(newsk) = sk; newsk->sk_state = TCP_ESTABLISHED; newsk->sk_type = sk->sk_type; - newsk->sk_peercred.pid = task_tgid_vnr(current); - current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid); + init_peercred(newsk); newu = unix_sk(newsk); newsk->sk_wq = &newu->peer_wq; otheru = unix_sk(other); @@ -1157,7 +1175,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, } /* Set credentials */ - sk->sk_peercred = other->sk_peercred; + copy_peercred(sk, other); sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; @@ -1199,10 +1217,8 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb) sock_hold(skb); unix_peer(ska) = skb; unix_peer(skb) = ska; - ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current); - current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid); - ska->sk_peercred.uid = skb->sk_peercred.uid; - ska->sk_peercred.gid = skb->sk_peercred.gid; + init_peercred(ska); + init_peercred(skb); if (ska->sk_type != SOCK_DGRAM) { ska->sk_state = TCP_ESTABLISHED; @@ -1297,18 +1313,20 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) int i; scm->fp = UNIXCB(skb).fp; - skb->destructor = sock_wfree; UNIXCB(skb).fp = NULL; for (i = scm->fp->count-1; i >= 0; i--) unix_notinflight(scm->fp->fp[i]); } -static void unix_destruct_fds(struct sk_buff *skb) +static void unix_destruct_scm(struct sk_buff *skb) { struct scm_cookie scm; memset(&scm, 0, sizeof(scm)); - unix_detach_fds(&scm, skb); + scm.pid = UNIXCB(skb).pid; + scm.cred = UNIXCB(skb).cred; + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); /* Alas, it calls VFS */ /* So fscking what? fput() had been SMP-safe since the last Summer */ @@ -1331,10 +1349,22 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) for (i = scm->fp->count-1; i >= 0; i--) unix_inflight(scm->fp->fp[i]); - skb->destructor = unix_destruct_fds; return 0; } +static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) +{ + int err = 0; + UNIXCB(skb).pid = get_pid(scm->pid); + UNIXCB(skb).cred = get_cred(scm->cred); + UNIXCB(skb).fp = NULL; + if (scm->fp && send_fds) + err = unix_attach_fds(scm, skb); + + skb->destructor = unix_destruct_scm; + return err; +} + /* * Send AF_UNIX data. */ @@ -1391,12 +1421,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (skb == NULL) goto out; - memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); - if (siocb->scm->fp) { - err = unix_attach_fds(siocb->scm, skb); - if (err) - goto out_free; - } + err = unix_scm_to_skb(siocb->scm, skb, true); + if (err) + goto out_free; unix_get_secdata(siocb->scm, skb); skb_reset_transport_header(skb); @@ -1566,16 +1593,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, */ size = min_t(int, size, skb_tailroom(skb)); - memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); + /* Only send the fds in the first buffer */ - if (siocb->scm->fp && !fds_sent) { - err = unix_attach_fds(siocb->scm, skb); - if (err) { - kfree_skb(skb); - goto out_err; - } - fds_sent = true; + err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); + if (err) { + kfree_skb(skb); + goto out_err; } + fds_sent = true; err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { @@ -1692,7 +1717,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, siocb->scm = &tmp_scm; memset(&tmp_scm, 0, sizeof(tmp_scm)); } - siocb->scm->creds = *UNIXCREDS(skb); + scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); unix_set_secdata(siocb->scm, skb); if (!(flags & MSG_PEEK)) { @@ -1841,14 +1866,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, if (check_creds) { /* Never glue messages from different writers */ - if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, - sizeof(siocb->scm->creds)) != 0) { + if ((UNIXCB(skb).pid != siocb->scm->pid) || + (UNIXCB(skb).cred != siocb->scm->cred)) { skb_queue_head(&sk->sk_receive_queue, skb); break; } } else { /* Copy credentials */ - siocb->scm->creds = *UNIXCREDS(skb); + scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); check_creds = 1; } @@ -1881,7 +1906,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, break; } - kfree_skb(skb); + consume_skb(skb); if (siocb->scm->fp) break; diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 258daa80ad926d80d86abcf06aaeb177514d1791..2bf23406637ae1eb849abb8633fd4658f5cf3332 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -48,7 +48,7 @@ #include #include /* support for loadable modules */ #include /* kmalloc(), kfree() */ -#include +#include #include #include /* inline mem*, str* functions */ @@ -71,6 +71,7 @@ * WAN device IOCTL handlers */ +static DEFINE_MUTEX(wanrouter_mutex); static int wanrouter_device_setup(struct wan_device *wandev, wandev_conf_t __user *u_conf); static int wanrouter_device_stat(struct wan_device *wandev, @@ -376,7 +377,7 @@ long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (wandev->magic != ROUTER_MAGIC) return -EINVAL; - lock_kernel(); + mutex_lock(&wanrouter_mutex); switch (cmd) { case ROUTER_SETUP: err = wanrouter_device_setup(wandev, data); @@ -408,7 +409,7 @@ long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = wandev->ioctl(wandev, cmd, arg); else err = -EINVAL; } - unlock_kernel(); + mutex_unlock(&wanrouter_mutex); return err; } diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c index c44d96b3a437a6019ba254b99877cc7ee025f765..11f25c7a7a0504baa1c906f28ce452674f45f968 100644 --- a/net/wanrouter/wanproc.c +++ b/net/wanrouter/wanproc.c @@ -27,7 +27,7 @@ #include #include /* WAN router API definitions */ #include -#include +#include #include #include @@ -66,6 +66,7 @@ * /proc/net/router */ +static DEFINE_MUTEX(config_mutex); static struct proc_dir_entry *proc_router; /* Strings */ @@ -85,7 +86,7 @@ static void *r_start(struct seq_file *m, loff_t *pos) struct wan_device *wandev; loff_t l = *pos; - lock_kernel(); + mutex_lock(&config_mutex); if (!l--) return SEQ_START_TOKEN; for (wandev = wanrouter_router_devlist; l-- && wandev; @@ -104,7 +105,7 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos) static void r_stop(struct seq_file *m, void *v) __releases(kernel_lock) { - unlock_kernel(); + mutex_unlock(&config_mutex); } static int config_show(struct seq_file *m, void *v) diff --git a/net/wireless/chan.c b/net/wireless/chan.c index b01a6f6397d7be03659547f98f06569011ffac77..d0c92dddb26bddb5b3d55a82ebf39fa8917e446c 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -35,8 +35,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, if (!ht_cap->ht_supported) return NULL; - if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || - ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) + if (channel_type != NL80211_CHAN_HT20 && + (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || + ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) return NULL; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 37d0e0ab4432c7cecac400debdaeee1e1319c94b..541e2fff5e9c5a0fab346fa55ac9f078630dcf43 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -472,24 +472,22 @@ int wiphy_register(struct wiphy *wiphy) /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); + mutex_lock(&cfg80211_mutex); + res = device_add(&rdev->wiphy.dev); if (res) - return res; + goto out_unlock; res = rfkill_register(rdev->rfkill); if (res) goto out_rm_dev; - mutex_lock(&cfg80211_mutex); - /* set up regulatory info */ wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); list_add_rcu(&rdev->list, &cfg80211_rdev_list); cfg80211_rdev_list_generation++; - mutex_unlock(&cfg80211_mutex); - /* add to debugfs */ rdev->wiphy.debugfsdir = debugfs_create_dir(wiphy_name(&rdev->wiphy), @@ -509,11 +507,15 @@ int wiphy_register(struct wiphy *wiphy) } cfg80211_debugfs_rdev_add(rdev); + mutex_unlock(&cfg80211_mutex); return 0; - out_rm_dev: +out_rm_dev: device_del(&rdev->wiphy.dev); + +out_unlock: + mutex_unlock(&cfg80211_mutex); return res; } EXPORT_SYMBOL(wiphy_register); @@ -894,7 +896,7 @@ static int __init cfg80211_init(void) } subsys_initcall(cfg80211_init); -static void cfg80211_exit(void) +static void __exit cfg80211_exit(void) { debugfs_remove(ieee80211_debugfs_dir); nl80211_exit(); @@ -905,3 +907,52 @@ static void cfg80211_exit(void) destroy_workqueue(cfg80211_wq); } module_exit(cfg80211_exit); + +static int ___wiphy_printk(const char *level, const struct wiphy *wiphy, + struct va_format *vaf) +{ + if (!wiphy) + return printk("%s(NULL wiphy *): %pV", level, vaf); + + return printk("%s%s: %pV", level, wiphy_name(wiphy), vaf); +} + +int __wiphy_printk(const char *level, const struct wiphy *wiphy, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int r; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + r = ___wiphy_printk(level, wiphy, &vaf); + va_end(args); + + return r; +} +EXPORT_SYMBOL(__wiphy_printk); + +#define define_wiphy_printk_level(func, kern_level) \ +int func(const struct wiphy *wiphy, const char *fmt, ...) \ +{ \ + struct va_format vaf; \ + va_list args; \ + int r; \ + \ + va_start(args, fmt); \ + \ + vaf.fmt = fmt; \ + vaf.va = &args; \ + \ + r = ___wiphy_printk(kern_level, wiphy, &vaf); \ + va_end(args); \ + \ + return r; \ +} \ +EXPORT_SYMBOL(func); + +define_wiphy_printk_level(wiphy_debug, KERN_DEBUG); diff --git a/net/wireless/core.h b/net/wireless/core.h index ae930acf75e9fb45bef945da23a0fdd1690e61f6..63d57ae399c3f9d73da4f2bf963867a6699af4c4 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -339,6 +339,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, + bool channel_type_valid, const u8 *buf, size_t len, u64 *cookie); /* SME */ diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk index 3cc9e69880a80ab90d1dc872c0f87c7263e0f9fe..53c143f5e770d3dacb649acbec621c63dd8b9a5a 100644 --- a/net/wireless/genregdb.awk +++ b/net/wireless/genregdb.awk @@ -21,6 +21,7 @@ BEGIN { print "" print "#include " print "#include " + print "#include \"regdb.h\"" print "" regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n" } diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index adcabba02e20c78199d3913a626fe74f209e002d..27a8ce9343c3e6d7a3bfdd6f801a7e852504a9c3 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -247,8 +247,10 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, if (!netif_running(wdev->netdev)) return 0; - if (wdev->wext.keys) + if (wdev->wext.keys) { wdev->wext.keys->def = wdev->wext.default_key; + wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; + } wdev->wext.ibss.privacy = wdev->wext.default_key != -1; diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c index b7fa31d5fd13892a93c774de5ce3a2daa02af62d..dacb3b4b1bdbf5c786e18156064ef18253b2d46b 100644 --- a/net/wireless/lib80211_crypt_ccmp.c +++ b/net/wireless/lib80211_crypt_ccmp.c @@ -467,7 +467,6 @@ static struct lib80211_crypto_ops lib80211_crypt_ccmp = { .name = "CCMP", .init = lib80211_ccmp_init, .deinit = lib80211_ccmp_deinit, - .build_iv = lib80211_ccmp_hdr, .encrypt_mpdu = lib80211_ccmp_encrypt, .decrypt_mpdu = lib80211_ccmp_decrypt, .encrypt_msdu = NULL, diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index 8cbdb32ff31657fd312969ff319e1e79246e2238..0fe40510e2cb38a284d8ae50f6a0b42a9b458809 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c @@ -578,7 +578,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) } if (ieee80211_is_data_qos(hdr11->frame_control)) { - hdr[12] = le16_to_cpu(*ieee80211_get_qos_ctl(hdr11)) + hdr[12] = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(hdr11))) & IEEE80211_QOS_CTL_TID_MASK; } else hdr[12] = 0; /* priority */ @@ -757,7 +757,6 @@ static struct lib80211_crypto_ops lib80211_crypt_tkip = { .name = "TKIP", .init = lib80211_tkip_init, .deinit = lib80211_tkip_deinit, - .build_iv = lib80211_tkip_hdr, .encrypt_mpdu = lib80211_tkip_encrypt, .decrypt_mpdu = lib80211_tkip_decrypt, .encrypt_msdu = lib80211_michael_mic_add, diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c index 6d41e05ca33bd83d865c19117e77f9822bb05275..e2e88878ba35a845d20af5475692b05a1a14abca 100644 --- a/net/wireless/lib80211_crypt_wep.c +++ b/net/wireless/lib80211_crypt_wep.c @@ -269,7 +269,6 @@ static struct lib80211_crypto_ops lib80211_crypt_wep = { .name = "WEP", .init = lib80211_wep_init, .deinit = lib80211_wep_deinit, - .build_iv = lib80211_wep_build_iv, .encrypt_mpdu = lib80211_wep_encrypt, .decrypt_mpdu = lib80211_wep_decrypt, .encrypt_msdu = NULL, diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 48ead6f0426dc9763cc49819f7ac6e613aa36a76..e74a1a2119d34a334702bc16106c43fefa2ffdbd 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -44,10 +44,10 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) } } - WARN_ON(!done); - - nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); - cfg80211_sme_rx_auth(dev, buf, len); + if (done) { + nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); + cfg80211_sme_rx_auth(dev, buf, len); + } wdev_unlock(wdev); } @@ -827,6 +827,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, + bool channel_type_valid, const u8 *buf, size_t len, u64 *cookie) { struct wireless_dev *wdev = dev->ieee80211_ptr; @@ -845,8 +846,9 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, if (!wdev->current_bss || memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, ETH_ALEN) != 0 || - memcmp(wdev->current_bss->pub.bssid, mgmt->da, - ETH_ALEN) != 0) + (wdev->iftype == NL80211_IFTYPE_STATION && + memcmp(wdev->current_bss->pub.bssid, mgmt->da, + ETH_ALEN) != 0)) return -ENOTCONN; } @@ -855,7 +857,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, /* Transmit the Action frame as requested by user space */ return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, - buf, len, cookie); + channel_type_valid, buf, len, cookie); } bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index db71150b8040d07d9590a8229b9070b7461d7279..37902a54e9c154fb8dbed3e1d996d8a198bdc17b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -153,6 +153,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 }, + + [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 }, }; /* policy for the attributes */ @@ -869,6 +872,34 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) goto bad_res; } + if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { + enum nl80211_tx_power_setting type; + int idx, mbm = 0; + + if (!rdev->ops->set_tx_power) { + result = -EOPNOTSUPP; + goto bad_res; + } + + idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING; + type = nla_get_u32(info->attrs[idx]); + + if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] && + (type != NL80211_TX_POWER_AUTOMATIC)) { + result = -EINVAL; + goto bad_res; + } + + if (type != NL80211_TX_POWER_AUTOMATIC) { + idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL; + mbm = nla_get_u32(info->attrs[idx]); + } + + result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm); + if (result) + goto bad_res; + } + changed = 0; if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { @@ -1107,7 +1138,7 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype) { if (!use_4addr) { - if (netdev && netdev->br_port) + if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT)) return -EBUSY; return 0; } @@ -2738,6 +2769,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, nla_put_failure: genlmsg_cancel(msg, hdr); + nlmsg_free(msg); err = -EMSGSIZE; out: /* Cleanup */ @@ -2929,6 +2961,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) nla_put_failure: genlmsg_cancel(msg, hdr); + nlmsg_free(msg); err = -EMSGSIZE; out: mutex_unlock(&cfg80211_mutex); @@ -3955,6 +3988,55 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) } } + if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { + u8 *rates = + nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + int n_rates = + nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + struct ieee80211_supported_band *sband = + wiphy->bands[ibss.channel->band]; + int i, j; + + if (n_rates == 0) { + err = -EINVAL; + goto out; + } + + for (i = 0; i < n_rates; i++) { + int rate = (rates[i] & 0x7f) * 5; + bool found = false; + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) { + found = true; + ibss.basic_rates |= BIT(j); + break; + } + } + if (!found) { + err = -EINVAL; + goto out; + } + } + } else { + /* + * If no rates were explicitly configured, + * use the mandatory rate set for 11b or + * 11a for maximum compatibility. + */ + struct ieee80211_supported_band *sband = + wiphy->bands[ibss.channel->band]; + int j; + u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ? + IEEE80211_RATE_MANDATORY_A : + IEEE80211_RATE_MANDATORY_B; + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].flags & flag) + ibss.basic_rates |= BIT(j); + } + } + err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); out: @@ -4653,7 +4735,8 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info) if (err) goto unlock_rtnl; - if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { err = -EOPNOTSUPP; goto out; } @@ -4681,6 +4764,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) struct net_device *dev; struct ieee80211_channel *chan; enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + bool channel_type_valid = false; u32 freq; int err; void *hdr; @@ -4702,7 +4786,8 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) goto out; } - if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { err = -EOPNOTSUPP; goto out; } @@ -4722,6 +4807,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) err = -EINVAL; goto out; } + channel_type_valid = true; } freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); @@ -4745,6 +4831,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) goto free_msg; } err = cfg80211_mlme_action(rdev, dev, chan, channel_type, + channel_type_valid, nla_data(info->attrs[NL80211_ATTR_FRAME]), nla_len(info->attrs[NL80211_ATTR_FRAME]), &cookie); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 8f0d97dd3109c54cd85fe01095308171d34d9f01..f180db0de66cbc2163ce3dab641b46cb647226d2 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -66,21 +66,13 @@ static struct platform_device *reg_pdev; */ const struct ieee80211_regdomain *cfg80211_regdomain; -/* - * We use this as a place for the rd structure built from the - * last parsed country IE to rest until CRDA gets back to us with - * what it thinks should apply for the same country - */ -static const struct ieee80211_regdomain *country_ie_regdomain; - /* * Protects static reg.c components: * - cfg80211_world_regdom * - cfg80211_regdom - * - country_ie_regdomain * - last_request */ -DEFINE_MUTEX(reg_mutex); +static DEFINE_MUTEX(reg_mutex); #define assert_reg_lock() WARN_ON(!mutex_is_locked(®_mutex)) /* Used to queue up regulatory hints */ @@ -275,25 +267,6 @@ static bool is_user_regdom_saved(void) return true; } -/** - * country_ie_integrity_changes - tells us if the country IE has changed - * @checksum: checksum of country IE of fields we are interested in - * - * If the country IE has not changed you can ignore it safely. This is - * useful to determine if two devices are seeing two different country IEs - * even on the same alpha2. Note that this will return false if no IE has - * been set on the wireless core yet. - */ -static bool country_ie_integrity_changes(u32 checksum) -{ - /* If no IE has been set then the checksum doesn't change */ - if (unlikely(!last_request->country_ie_checksum)) - return false; - if (unlikely(last_request->country_ie_checksum != checksum)) - return true; - return false; -} - static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd, const struct ieee80211_regdomain *src_regd) { @@ -505,471 +478,6 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, #undef ONE_GHZ_IN_KHZ } -/* - * This is a work around for sanity checking ieee80211_channel_to_frequency()'s - * work. ieee80211_channel_to_frequency() can for example currently provide a - * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be - * an AP providing channel 8 on a country IE triplet when it sent this on the - * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz - * channel. - * - * This can be removed once ieee80211_channel_to_frequency() takes in a band. - */ -static bool chan_in_band(int chan, enum ieee80211_band band) -{ - int center_freq = ieee80211_channel_to_frequency(chan); - - switch (band) { - case IEEE80211_BAND_2GHZ: - if (center_freq <= 2484) - return true; - return false; - case IEEE80211_BAND_5GHZ: - if (center_freq >= 5005) - return true; - return false; - default: - return false; - } -} - -/* - * Some APs may send a country IE triplet for each channel they - * support and while this is completely overkill and silly we still - * need to support it. We avoid making a single rule for each channel - * though and to help us with this we use this helper to find the - * actual subband end channel. These type of country IE triplet - * scenerios are handled then, all yielding two regulaotry rules from - * parsing a country IE: - * - * [1] - * [2] - * [36] - * [40] - * - * [1] - * [2-4] - * [5-12] - * [36] - * [40-44] - * - * [1-4] - * [5-7] - * [36-44] - * [48-64] - * - * [36-36] - * [40-40] - * [44-44] - * [48-48] - * [52-52] - * [56-56] - * [60-60] - * [64-64] - * [100-100] - * [104-104] - * [108-108] - * [112-112] - * [116-116] - * [120-120] - * [124-124] - * [128-128] - * [132-132] - * [136-136] - * [140-140] - * - * Returns 0 if the IE has been found to be invalid in the middle - * somewhere. - */ -static int max_subband_chan(enum ieee80211_band band, - int orig_cur_chan, - int orig_end_channel, - s8 orig_max_power, - u8 **country_ie, - u8 *country_ie_len) -{ - u8 *triplets_start = *country_ie; - u8 len_at_triplet = *country_ie_len; - int end_subband_chan = orig_end_channel; - - /* - * We'll deal with padding for the caller unless - * its not immediate and we don't process any channels - */ - if (*country_ie_len == 1) { - *country_ie += 1; - *country_ie_len -= 1; - return orig_end_channel; - } - - /* Move to the next triplet and then start search */ - *country_ie += 3; - *country_ie_len -= 3; - - if (!chan_in_band(orig_cur_chan, band)) - return 0; - - while (*country_ie_len >= 3) { - int end_channel = 0; - struct ieee80211_country_ie_triplet *triplet = - (struct ieee80211_country_ie_triplet *) *country_ie; - int cur_channel = 0, next_expected_chan; - - /* means last triplet is completely unrelated to this one */ - if (triplet->ext.reg_extension_id >= - IEEE80211_COUNTRY_EXTENSION_ID) { - *country_ie -= 3; - *country_ie_len += 3; - break; - } - - if (triplet->chans.first_channel == 0) { - *country_ie += 1; - *country_ie_len -= 1; - if (*country_ie_len != 0) - return 0; - break; - } - - if (triplet->chans.num_channels == 0) - return 0; - - /* Monitonically increasing channel order */ - if (triplet->chans.first_channel <= end_subband_chan) - return 0; - - if (!chan_in_band(triplet->chans.first_channel, band)) - return 0; - - /* 2 GHz */ - if (triplet->chans.first_channel <= 14) { - end_channel = triplet->chans.first_channel + - triplet->chans.num_channels - 1; - } - else { - end_channel = triplet->chans.first_channel + - (4 * (triplet->chans.num_channels - 1)); - } - - if (!chan_in_band(end_channel, band)) - return 0; - - if (orig_max_power != triplet->chans.max_power) { - *country_ie -= 3; - *country_ie_len += 3; - break; - } - - cur_channel = triplet->chans.first_channel; - - /* The key is finding the right next expected channel */ - if (band == IEEE80211_BAND_2GHZ) - next_expected_chan = end_subband_chan + 1; - else - next_expected_chan = end_subband_chan + 4; - - if (cur_channel != next_expected_chan) { - *country_ie -= 3; - *country_ie_len += 3; - break; - } - - end_subband_chan = end_channel; - - /* Move to the next one */ - *country_ie += 3; - *country_ie_len -= 3; - - /* - * Padding needs to be dealt with if we processed - * some channels. - */ - if (*country_ie_len == 1) { - *country_ie += 1; - *country_ie_len -= 1; - break; - } - - /* If seen, the IE is invalid */ - if (*country_ie_len == 2) - return 0; - } - - if (end_subband_chan == orig_end_channel) { - *country_ie = triplets_start; - *country_ie_len = len_at_triplet; - return orig_end_channel; - } - - return end_subband_chan; -} - -/* - * Converts a country IE to a regulatory domain. A regulatory domain - * structure has a lot of information which the IE doesn't yet have, - * so for the other values we use upper max values as we will intersect - * with our userspace regulatory agent to get lower bounds. - */ -static struct ieee80211_regdomain *country_ie_2_rd( - enum ieee80211_band band, - u8 *country_ie, - u8 country_ie_len, - u32 *checksum) -{ - struct ieee80211_regdomain *rd = NULL; - unsigned int i = 0; - char alpha2[2]; - u32 flags = 0; - u32 num_rules = 0, size_of_regd = 0; - u8 *triplets_start = NULL; - u8 len_at_triplet = 0; - /* the last channel we have registered in a subband (triplet) */ - int last_sub_max_channel = 0; - - *checksum = 0xDEADBEEF; - - /* Country IE requirements */ - BUG_ON(country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN || - country_ie_len & 0x01); - - alpha2[0] = country_ie[0]; - alpha2[1] = country_ie[1]; - - /* - * Third octet can be: - * 'I' - Indoor - * 'O' - Outdoor - * - * anything else we assume is no restrictions - */ - if (country_ie[2] == 'I') - flags = NL80211_RRF_NO_OUTDOOR; - else if (country_ie[2] == 'O') - flags = NL80211_RRF_NO_INDOOR; - - country_ie += 3; - country_ie_len -= 3; - - triplets_start = country_ie; - len_at_triplet = country_ie_len; - - *checksum ^= ((flags ^ alpha2[0] ^ alpha2[1]) << 8); - - /* - * We need to build a reg rule for each triplet, but first we must - * calculate the number of reg rules we will need. We will need one - * for each channel subband - */ - while (country_ie_len >= 3) { - int end_channel = 0; - struct ieee80211_country_ie_triplet *triplet = - (struct ieee80211_country_ie_triplet *) country_ie; - int cur_sub_max_channel = 0, cur_channel = 0; - - if (triplet->ext.reg_extension_id >= - IEEE80211_COUNTRY_EXTENSION_ID) { - country_ie += 3; - country_ie_len -= 3; - continue; - } - - /* - * APs can add padding to make length divisible - * by two, required by the spec. - */ - if (triplet->chans.first_channel == 0) { - country_ie++; - country_ie_len--; - /* This is expected to be at the very end only */ - if (country_ie_len != 0) - return NULL; - break; - } - - if (triplet->chans.num_channels == 0) - return NULL; - - if (!chan_in_band(triplet->chans.first_channel, band)) - return NULL; - - /* 2 GHz */ - if (band == IEEE80211_BAND_2GHZ) - end_channel = triplet->chans.first_channel + - triplet->chans.num_channels - 1; - else - /* - * 5 GHz -- For example in country IEs if the first - * channel given is 36 and the number of channels is 4 - * then the individual channel numbers defined for the - * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 - * and not 36, 37, 38, 39. - * - * See: http://tinyurl.com/11d-clarification - */ - end_channel = triplet->chans.first_channel + - (4 * (triplet->chans.num_channels - 1)); - - cur_channel = triplet->chans.first_channel; - - /* - * Enhancement for APs that send a triplet for every channel - * or for whatever reason sends triplets with multiple channels - * separated when in fact they should be together. - */ - end_channel = max_subband_chan(band, - cur_channel, - end_channel, - triplet->chans.max_power, - &country_ie, - &country_ie_len); - if (!end_channel) - return NULL; - - if (!chan_in_band(end_channel, band)) - return NULL; - - cur_sub_max_channel = end_channel; - - /* Basic sanity check */ - if (cur_sub_max_channel < cur_channel) - return NULL; - - /* - * Do not allow overlapping channels. Also channels - * passed in each subband must be monotonically - * increasing - */ - if (last_sub_max_channel) { - if (cur_channel <= last_sub_max_channel) - return NULL; - if (cur_sub_max_channel <= last_sub_max_channel) - return NULL; - } - - /* - * When dot11RegulatoryClassesRequired is supported - * we can throw ext triplets as part of this soup, - * for now we don't care when those change as we - * don't support them - */ - *checksum ^= ((cur_channel ^ cur_sub_max_channel) << 8) | - ((cur_sub_max_channel ^ cur_sub_max_channel) << 16) | - ((triplet->chans.max_power ^ cur_sub_max_channel) << 24); - - last_sub_max_channel = cur_sub_max_channel; - - num_rules++; - - if (country_ie_len >= 3) { - country_ie += 3; - country_ie_len -= 3; - } - - /* - * Note: this is not a IEEE requirement but - * simply a memory requirement - */ - if (num_rules > NL80211_MAX_SUPP_REG_RULES) - return NULL; - } - - country_ie = triplets_start; - country_ie_len = len_at_triplet; - - size_of_regd = sizeof(struct ieee80211_regdomain) + - (num_rules * sizeof(struct ieee80211_reg_rule)); - - rd = kzalloc(size_of_regd, GFP_KERNEL); - if (!rd) - return NULL; - - rd->n_reg_rules = num_rules; - rd->alpha2[0] = alpha2[0]; - rd->alpha2[1] = alpha2[1]; - - /* This time around we fill in the rd */ - while (country_ie_len >= 3) { - int end_channel = 0; - struct ieee80211_country_ie_triplet *triplet = - (struct ieee80211_country_ie_triplet *) country_ie; - struct ieee80211_reg_rule *reg_rule = NULL; - struct ieee80211_freq_range *freq_range = NULL; - struct ieee80211_power_rule *power_rule = NULL; - - /* - * Must parse if dot11RegulatoryClassesRequired is true, - * we don't support this yet - */ - if (triplet->ext.reg_extension_id >= - IEEE80211_COUNTRY_EXTENSION_ID) { - country_ie += 3; - country_ie_len -= 3; - continue; - } - - if (triplet->chans.first_channel == 0) { - country_ie++; - country_ie_len--; - break; - } - - reg_rule = &rd->reg_rules[i]; - freq_range = ®_rule->freq_range; - power_rule = ®_rule->power_rule; - - reg_rule->flags = flags; - - /* 2 GHz */ - if (band == IEEE80211_BAND_2GHZ) - end_channel = triplet->chans.first_channel + - triplet->chans.num_channels -1; - else - end_channel = triplet->chans.first_channel + - (4 * (triplet->chans.num_channels - 1)); - - end_channel = max_subband_chan(band, - triplet->chans.first_channel, - end_channel, - triplet->chans.max_power, - &country_ie, - &country_ie_len); - - /* - * The +10 is since the regulatory domain expects - * the actual band edge, not the center of freq for - * its start and end freqs, assuming 20 MHz bandwidth on - * the channels passed - */ - freq_range->start_freq_khz = - MHZ_TO_KHZ(ieee80211_channel_to_frequency( - triplet->chans.first_channel) - 10); - freq_range->end_freq_khz = - MHZ_TO_KHZ(ieee80211_channel_to_frequency( - end_channel) + 10); - - /* - * These are large arbitrary values we use to intersect later. - * Increment this if we ever support >= 40 MHz channels - * in IEEE 802.11 - */ - freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40); - power_rule->max_antenna_gain = DBI_TO_MBI(100); - power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power); - - i++; - - if (country_ie_len >= 3) { - country_ie += 3; - country_ie_len -= 3; - } - - BUG_ON(i > NL80211_MAX_SUPP_REG_RULES); - } - - return rd; -} - - /* * Helper for regdom_intersect(), this does the real * mathematical intersection fun @@ -1191,7 +699,6 @@ static int freq_reg_info_regd(struct wiphy *wiphy, return -EINVAL; } -EXPORT_SYMBOL(freq_reg_info); int freq_reg_info(struct wiphy *wiphy, u32 center_freq, @@ -1205,6 +712,7 @@ int freq_reg_info(struct wiphy *wiphy, reg_rule, NULL); } +EXPORT_SYMBOL(freq_reg_info); /* * Note that right now we assume the desired channel bandwidth @@ -1243,41 +751,8 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, desired_bw_khz, ®_rule); - if (r) { - /* - * This means no regulatory rule was found in the country IE - * with a frequency range on the center_freq's band, since - * IEEE-802.11 allows for a country IE to have a subset of the - * regulatory information provided in a country we ignore - * disabling the channel unless at least one reg rule was - * found on the center_freq's band. For details see this - * clarification: - * - * http://tinyurl.com/11d-clarification - */ - if (r == -ERANGE && - last_request->initiator == - NL80211_REGDOM_SET_BY_COUNTRY_IE) { - REG_DBG_PRINT("cfg80211: Leaving channel %d MHz " - "intact on %s - no rule found in band on " - "Country IE\n", - chan->center_freq, wiphy_name(wiphy)); - } else { - /* - * In this case we know the country IE has at least one reg rule - * for the band so we respect its band definitions - */ - if (last_request->initiator == - NL80211_REGDOM_SET_BY_COUNTRY_IE) - REG_DBG_PRINT("cfg80211: Disabling " - "channel %d MHz on %s due to " - "Country IE\n", - chan->center_freq, wiphy_name(wiphy)); - flags |= IEEE80211_CHAN_DISABLED; - chan->flags = flags; - } + if (r) return; - } power_rule = ®_rule->power_rule; freq_range = ®_rule->freq_range; @@ -1831,6 +1306,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) { int r = 0; struct wiphy *wiphy = NULL; + enum nl80211_reg_initiator initiator = reg_request->initiator; BUG_ON(!reg_request->alpha2); @@ -1850,7 +1326,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) /* This is required so that the orig_* parameters are saved */ if (r == -EALREADY && wiphy && wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) - wiphy_update_regulatory(wiphy, reg_request->initiator); + wiphy_update_regulatory(wiphy, initiator); out: mutex_unlock(®_mutex); mutex_unlock(&cfg80211_mutex); @@ -2008,35 +1484,6 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) } EXPORT_SYMBOL(regulatory_hint); -/* Caller must hold reg_mutex */ -static bool reg_same_country_ie_hint(struct wiphy *wiphy, - u32 country_ie_checksum) -{ - struct wiphy *request_wiphy; - - assert_reg_lock(); - - if (unlikely(last_request->initiator != - NL80211_REGDOM_SET_BY_COUNTRY_IE)) - return false; - - request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); - - if (!request_wiphy) - return false; - - if (likely(request_wiphy != wiphy)) - return !country_ie_integrity_changes(country_ie_checksum); - /* - * We should not have let these through at this point, they - * should have been picked up earlier by the first alpha2 check - * on the device - */ - if (WARN_ON(!country_ie_integrity_changes(country_ie_checksum))) - return true; - return false; -} - /* * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and * therefore cannot iterate over the rdev list here. @@ -2046,9 +1493,7 @@ void regulatory_hint_11d(struct wiphy *wiphy, u8 *country_ie, u8 country_ie_len) { - struct ieee80211_regdomain *rd = NULL; char alpha2[2]; - u32 checksum = 0; enum environment_cap env = ENVIRON_ANY; struct regulatory_request *request; @@ -2064,14 +1509,6 @@ void regulatory_hint_11d(struct wiphy *wiphy, if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) goto out; - /* - * Pending country IE processing, this can happen after we - * call CRDA and wait for a response if a beacon was received before - * we were able to process the last regulatory_hint_11d() call - */ - if (country_ie_regdomain) - goto out; - alpha2[0] = country_ie[0]; alpha2[1] = country_ie[1]; @@ -2090,39 +1527,14 @@ void regulatory_hint_11d(struct wiphy *wiphy, wiphy_idx_valid(last_request->wiphy_idx))) goto out; - rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum); - if (!rd) { - REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n"); - goto out; - } - - /* - * This will not happen right now but we leave it here for the - * the future when we want to add suspend/resume support and having - * the user move to another country after doing so, or having the user - * move to another AP. Right now we just trust the first AP. - * - * If we hit this before we add this support we want to be informed of - * it as it would indicate a mistake in the current design - */ - if (WARN_ON(reg_same_country_ie_hint(wiphy, checksum))) - goto free_rd_out; - request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) - goto free_rd_out; - - /* - * We keep this around for when CRDA comes back with a response so - * we can intersect with that - */ - country_ie_regdomain = rd; + goto out; request->wiphy_idx = get_wiphy_idx(wiphy); - request->alpha2[0] = rd->alpha2[0]; - request->alpha2[1] = rd->alpha2[1]; + request->alpha2[0] = alpha2[0]; + request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; - request->country_ie_checksum = checksum; request->country_ie_env = env; mutex_unlock(®_mutex); @@ -2131,8 +1543,6 @@ void regulatory_hint_11d(struct wiphy *wiphy, return; -free_rd_out: - kfree(rd); out: mutex_unlock(®_mutex); } @@ -2383,33 +1793,6 @@ static void print_regdomain_info(const struct ieee80211_regdomain *rd) print_rd_rules(rd); } -#ifdef CONFIG_CFG80211_REG_DEBUG -static void reg_country_ie_process_debug( - const struct ieee80211_regdomain *rd, - const struct ieee80211_regdomain *country_ie_regdomain, - const struct ieee80211_regdomain *intersected_rd) -{ - printk(KERN_DEBUG "cfg80211: Received country IE:\n"); - print_regdomain_info(country_ie_regdomain); - printk(KERN_DEBUG "cfg80211: CRDA thinks this should applied:\n"); - print_regdomain_info(rd); - if (intersected_rd) { - printk(KERN_DEBUG "cfg80211: We intersect both of these " - "and get:\n"); - print_regdomain_info(intersected_rd); - return; - } - printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); -} -#else -static inline void reg_country_ie_process_debug( - const struct ieee80211_regdomain *rd, - const struct ieee80211_regdomain *country_ie_regdomain, - const struct ieee80211_regdomain *intersected_rd) -{ -} -#endif - /* Takes ownership of rd only if it doesn't fail */ static int __set_regdom(const struct ieee80211_regdomain *rd) { @@ -2521,34 +1904,6 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) return 0; } - /* - * Country IE requests are handled a bit differently, we intersect - * the country IE rd with what CRDA believes that country should have - */ - - /* - * Userspace could have sent two replies with only - * one kernel request. By the second reply we would have - * already processed and consumed the country_ie_regdomain. - */ - if (!country_ie_regdomain) - return -EALREADY; - BUG_ON(rd == country_ie_regdomain); - - /* - * Intersect what CRDA returned and our what we - * had built from the Country IE received - */ - - intersected_rd = regdom_intersect(rd, country_ie_regdomain); - - reg_country_ie_process_debug(rd, - country_ie_regdomain, - intersected_rd); - - kfree(country_ie_regdomain); - country_ie_regdomain = NULL; - if (!intersected_rd) return -EINVAL; @@ -2630,7 +1985,7 @@ void reg_device_remove(struct wiphy *wiphy) mutex_unlock(®_mutex); } -int regulatory_init(void) +int __init regulatory_init(void) { int err = 0; @@ -2676,7 +2031,7 @@ int regulatory_init(void) return 0; } -void regulatory_exit(void) +void /* __init_or_exit */ regulatory_exit(void) { struct regulatory_request *reg_request, *tmp; struct reg_beacon *reg_beacon, *btmp; @@ -2688,9 +2043,6 @@ void regulatory_exit(void) reset_regdomains(); - kfree(country_ie_regdomain); - country_ie_regdomain = NULL; - kfree(last_request); platform_device_unregister(reg_pdev); diff --git a/net/wireless/reg.h b/net/wireless/reg.h index b26224a9f3bc2ab2ea28d19a3a2ebbc2f5b2a765..c4695d07af23fd3852e8398787205d46a7b0d54c 100644 --- a/net/wireless/reg.h +++ b/net/wireless/reg.h @@ -10,7 +10,7 @@ int regulatory_hint_user(const char *alpha2); void reg_device_remove(struct wiphy *wiphy); -int regulatory_init(void); +int __init regulatory_init(void); void regulatory_exit(void); int set_regdom(const struct ieee80211_regdomain *rd); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 58401d246bda9defe3a142f1c64c0b048530fd63..5ca8c7180141d8dc9bc43d607eb26395cd0268bf 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -275,6 +275,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, { struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); struct cfg80211_internal_bss *bss, *res = NULL; + unsigned long now = jiffies; spin_lock_bh(&dev->bss_lock); @@ -283,6 +284,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, continue; if (channel && bss->pub.channel != channel) continue; + /* Don't get expired BSS structs */ + if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) && + !atomic_read(&bss->hold)) + continue; if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { res = bss; kref_get(&res->ref); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 72222f0074dbd4b24914386b85e02257d499941d..a8c2d6b877aeda894cb80328d015bc3494e000c9 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -35,7 +35,7 @@ struct cfg80211_conn { bool auto_auth, prev_bssid_valid; }; -bool cfg80211_is_all_idle(void) +static bool cfg80211_is_all_idle(void) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; diff --git a/net/wireless/util.c b/net/wireless/util.c index 3416373a9c0c80e91c21f687eb61ec7b91daa626..0c8a1e8b76903313ef0c413cc36261ec8af9399b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -770,8 +770,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, return -EOPNOTSUPP; /* if it's part of a bridge, reject changing type to station/ibss */ - if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC || - ntype == NL80211_IFTYPE_STATION)) + if ((dev->priv_flags & IFF_BRIDGE_PORT) && + (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION)) return -EBUSY; if (ntype != otype) { diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 96342993cf933237d76c33595497d938ed86ec3c..bb5e0a5ecfa1c2f999034bf4ceb34e69fbec855d 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -829,7 +829,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev, { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - enum tx_power_setting type; + enum nl80211_tx_power_setting type; int dbm = 0; if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) @@ -852,7 +852,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev, if (data->txpower.value < 0) return -EINVAL; dbm = data->txpower.value; - type = TX_POWER_FIXED; + type = NL80211_TX_POWER_FIXED; /* TODO: do regulatory check! */ } else { /* @@ -860,10 +860,10 @@ int cfg80211_wext_siwtxpower(struct net_device *dev, * passed in from userland. */ if (data->txpower.value < 0) { - type = TX_POWER_AUTOMATIC; + type = NL80211_TX_POWER_AUTOMATIC; } else { dbm = data->txpower.value; - type = TX_POWER_LIMITED; + type = NL80211_TX_POWER_LIMITED; } } } else { @@ -872,7 +872,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev, return 0; } - return rdev->ops->set_tx_power(wdev->wiphy, type, dbm); + return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm)); } EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower); @@ -1471,6 +1471,7 @@ int cfg80211_wext_siwpmksa(struct net_device *dev, return -EOPNOTSUPP; } } +EXPORT_SYMBOL_GPL(cfg80211_wext_siwpmksa); static const iw_handler cfg80211_handlers[] = { [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index a7ec5a8a2380e6277fa4a3755e01fb0aa58076fb..2b3ed7ad49338f3ec2d64caf1dd589c2aefc4264 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2492,7 +2492,8 @@ static int __net_init xfrm_statistics_init(struct net *net) int rv; if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, - sizeof(struct linux_xfrm_mib)) < 0) + sizeof(struct linux_xfrm_mib), + __alignof__(struct linux_xfrm_mib)) < 0) return -ENOMEM; rv = xfrm_proc_init(net); if (rv < 0)